def get_transform(point_cloud, is_training, bn_decay=None, K = 3):
    """ Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(input_image, 64, [1,3], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv4', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        assert(K==3)
        weights = tf.get_variable('weights', [128, 3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
        biases = tf.get_variable('biases', [3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
    transform = tf.reshape(transform, [batch_size, 3, K])
    return transform
def get_transform_K(inputs, is_training, bn_decay=None, K = 3):
    """ Transform Net, input is BxNx1xK gray image
        Return:
            Transformation matrix of size KxK """
    batch_size = inputs.get_shape()[0].value
    num_point = inputs.get_shape()[1].value

    net = tf_util.conv2d(inputs, 256, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv2', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)

    with tf.variable_scope('transform_feat') as sc:
        weights = tf.get_variable('weights', [256, K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
        biases = tf.get_variable('biases', [K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
    transform = tf.reshape(transform, [batch_size, K, K])
    return transform
Пример #3
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
                                 is_training, bn_decay, end_points):
    ''' 3D Box Estimation PointNet v1 network.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            point clouds in object coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
            including box centers, heading bin class scores and residuals,
            and size cluster scores and residuals
    ''' 
    num_point = object_point_cloud.get_shape()[1].value
    net = tf.expand_dims(object_point_cloud, 2)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg4', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1],
        padding='VALID', scope='maxpool2')
    net = tf.squeeze(net, axis=[1,2])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
        is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
        is_training=is_training, bn_decay=bn_decay)

    # The first 3 numbers: box center coordinates (cx,cy,cz),
    # the next NUM_HEADING_BIN*2:  heading bin class scores and bin residuals
    # next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
    output = tf_util.fully_connected(net,
        3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
    return output, end_points
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    input_image = tf.expand_dims(point_cloud, -1)
    
    # Point functions (MLP implemented as conv2d)
    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')
    
    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Пример #6
0
def get_center_regression_net(object_point_cloud, one_hot_vec,
                              is_training, bn_decay, end_points):
    ''' Regression network for center delta. a.k.a. T-Net.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            point clouds in 3D mask coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        predicted_center: TF tensor in shape (B,3)
    ''' 
    num_point = object_point_cloud.get_shape()[1].value
    net = tf.expand_dims(object_point_cloud, 2)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3-stage1', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1],
        padding='VALID', scope='maxpool-stage1')
    net = tf.squeeze(net, axis=[1,2])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
        is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
        is_training=is_training, bn_decay=bn_decay)
    predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
        scope='fc3-stage1')
    return predicted_center, end_points
Пример #7
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(3, [point_feat, global_feat_expand])
    print(concat_feat)

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 50, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2]) # BxNxC

    return net, end_points
def get_instance_seg_v1_net(point_cloud, one_hot_vec,
                            is_training, bn_decay, end_points):
    ''' 3D instance segmentation PointNet v1 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    net = tf.expand_dims(point_cloud, 2)

    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')

    global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)
    net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)

    logits = tf_util.conv2d(net, 2, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    logits = tf.squeeze(logits, [2]) # BxNxC
    return logits, end_points
Пример #9
0
def feature_transform_net(inputs, is_training, bn_decay=None, K=64):
    #特征变换网络,输入是BxNx1xK
    #返回:大小为KxK的变换矩阵
    """ Feature Transform Net, input is BxNx1xK
        Return:
            Transformation matrix of size KxK """
    batch_size = inputs.get_shape()[0].value
    num_point = inputs.get_shape()[1].value

    net = tf_util.conv2d(inputs,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_feat') as sc:
        weights = tf.get_variable('weights', [256, K * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [K * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, K, K])
    return transform
Пример #10
0
def pointnet_sa_module(xyz,
                       points,
                       npoint,
                       radius,
                       nsample,
                       mlp,
                       mlp2,
                       group_all,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       pooling='max',
                       tnet_spec=None,
                       knn=False,
                       use_xyz=True):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    with tf.variable_scope(scope) as sc:
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                npoint, radius, nsample, xyz, points, tnet_spec, knn, use_xyz)
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv%d' % (i),
                                        bn_decay=bn_decay)
        if pooling == 'avg':
            new_points = tf_util.avg_pool2d(new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='avgpool1')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg1'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists / tf.reduce_sum(
                    exp_dists, axis=2,
                    keep_dims=True)  # (batch_size, npoint, nsample, 1)
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling == 'max':
            new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True)
        elif pooling == 'min':
            new_points = tf_util.max_pool2d(-1 * new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='minpool1')
        elif pooling == 'max_and_avg':
            avg_points = tf_util.max_pool2d(new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='maxpool1')
            max_points = tf_util.avg_pool2d(new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='avgpool1')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp2):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv_post_%d' % (i),
                                        bn_decay=bn_decay)
        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx
Пример #11
0
def get_model_rbf0_gan(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net_no_bn(point_cloud,
                                              is_training,
                                              bn_decay,
                                              K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    point_cloud_transformed = tf.expand_dims(point_cloud_transformed, 3)

    #centroids = tf.constant(np.random.randn(1, 1, 3, 1024), dtype=tf.float32)
    centroids = tf.get_variable('centroids', [1, 1, 3, 1024],
                                initializer=tf.constant_initializer(
                                    0.2 * np.random.randn(1, 1, 3, 1024)),
                                dtype=tf.float32)

    feature = tf.tile(point_cloud_transformed, [1, 1, 1, 1024])

    bias = tf.tile(centroids, [batch_size, 1024, 1, 1])

    net = tf.subtract(feature, bias)
    net = tf.norm(net, axis=2, keep_dims=True)
    net = tf.exp(-net)

    # Symmetric function: max pooling
    features = tf_util.max_pool2d(net, [num_point, 1],
                                  padding='VALID',
                                  scope='maxpool')

    net = tf.reshape(features, [batch_size, -1])
    #net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,
    #                              scope='fc0', bn_decay=bn_decay)
    #net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                      scope='dp1')
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=False,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=False,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 41, activation_fn=None, scope='fc3')

    return net, end_points, features, centroids
Пример #12
0
def get_model_rbf_transform(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    c1 = 64
    #centroids = tf.constant(np.random.randn(1, 1, 3, 1024), dtype=tf.float32)
    centroids = tf.get_variable('centroids', [1, 1, 3, c1],
                                initializer=tf.constant_initializer(
                                    0.5 * np.random.randn(1, 1, 3, c1)),
                                dtype=tf.float32)
    net = tf.subtract(
        tf.tile(tf.expand_dims(point_cloud_transformed, 3), [1, 1, 1, c1]),
        tf.tile(centroids, [batch_size, num_point, 1, 1]))
    net = tf.norm(net, axis=2, keep_dims=True)
    net = tf.exp(-net)

    with tf.variable_scope('transform_net2', reuse=tf.AUTO_REUSE) as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    c2 = 256
    #centroids = tf.constant(np.random.randn(1, 1, 3, 1024), dtype=tf.float32)
    centroids2 = tf.get_variable('centroids2', [1, 1, c2, 64],
                                 initializer=tf.constant_initializer(
                                     0.5 * np.random.randn(1, 1, c2, 64)),
                                 dtype=tf.float32)
    net = tf.subtract(tf.tile(net_transformed, [1, 1, c2, 1]),
                      tf.tile(centroids2, [batch_size, num_point, 1, 1]))
    net = tf.norm(net, axis=3, keep_dims=True)
    net = tf.exp(-net)

    # Symmetric function: max pooling
    features = tf_util.max_pool2d(net, [num_point, 1],
                                  padding='VALID',
                                  scope='maxpool')

    net = tf.reshape(features, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points, features, centroids
Пример #13
0
def get_model_rbf3(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    point_cloud_transformed = tf.expand_dims(point_cloud_transformed, 3)

    #centroids = tf.constant(np.random.randn(1, 1, 3, 1024), dtype=tf.float32)
    c1 = 512
    c2 = 8
    centroids = tf.get_variable('centroids', [1, 1, 3, c1],
                                initializer=tf.constant_initializer(
                                    np.random.randn(1, 1, 3, c1)),
                                dtype=tf.float32)

    sub_centroids = tf.get_variable('sub_centroids', [1, 1, 3, c2],
                                    initializer=tf.constant_initializer(
                                        0.05 * np.random.randn(1, 1, 3, c2)),
                                    dtype=tf.float32)
    #sub_centroids = tf.constant(0.05*np.random.randn(1, 1, 3, c2), dtype=tf.float32)

    sub_bias = tf.add(
        tf.tile(tf.expand_dims(sub_centroids, 4), [1, 1, 1, 1, c1]),
        tf.tile(tf.expand_dims(centroids, 3), [1, 1, 1, c2, 1]))
    sub_bias = tf.tile(sub_bias, [batch_size, 1024, 1, 1, 1])
    sub_feature = tf.tile(tf.expand_dims(point_cloud_transformed, 4),
                          [1, 1, 1, c2, c1])
    sub_net = tf.exp(-tf.square(
        tf.norm(
            tf.subtract(sub_feature, sub_bias), ord=3, axis=2, keep_dims=True))
                     )
    sub_net = tf.squeeze(sub_net)
    sub_net = tf.transpose(sub_net, perm=[0, 1, 3, 2])
    sub_net = tf_util.max_pool2d(sub_net, [num_point, 1],
                                 stride=[1, 1],
                                 padding='VALID',
                                 scope='maxpool')
    #sub_net = tf_util.conv2d(sub_net, 16, [1,1],
    #                                 padding='VALID', stride=[1,1],
    #                                 bn=True, is_training=is_training,
    #                                 scope='mini_conv1', bn_decay=bn_decay)
    sub_net = tf_util.conv2d(sub_net,
                             2, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='mini_conv2',
                             bn_decay=bn_decay)
    sub_net = tf.squeeze(sub_net)

    feature = tf.tile(point_cloud_transformed, [1, 1, 1, c1])
    bias = tf.tile(centroids, [batch_size, 1024, 1, 1])
    net = tf.subtract(feature, bias)
    #net = tf.exp(net)
    net = tf.norm(net, ord=3, axis=2, keep_dims=True)
    net = tf.exp(-tf.square(net))
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')
    net = tf.expand_dims(tf.squeeze(net), 2)

    features = tf.concat([net, sub_net], axis=2)

    # Symmetric function: max pooling
    #features = tf_util.max_pool2d(net, [num_point,1],
    #                         padding='VALID', scope='maxpool')

    net = tf.reshape(features, [batch_size, -1])
    #net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,
    #                              scope='fc0', bn_decay=bn_decay)
    #net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                      scope='dp1')
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points, features, centroids
Пример #14
0
def get_encoder_network(images, is_training):
    # Args:
    #     images: is tensor of size BxHxWx1 where B is batch size, H and W are image dimensions
    # Returns:
    #     encoder: is a Python dictionary containing all tensors
    net = {}
    net['conv1'] = tf_util.conv2d(images,
                                  32, [3, 3],
                                  activation_fn=leak_relu,
                                  scope='encoder/conv1',
                                  bn=True,
                                  is_training=is_training)  # H * W
    net['conv2'] = tf_util.conv2d(net['conv1'],
                                  32, [3, 3],
                                  activation_fn=leak_relu,
                                  scope='encoder/conv2',
                                  bn=True,
                                  is_training=is_training)  # H * W
    net['pool3'] = tf_util.max_pool2d(net['conv2'], [2, 2],
                                      scope='encoder/pool3',
                                      stride=[2, 2],
                                      padding='VALID')  # H/2 * W/2
    net['conv4'] = tf_util.conv2d(net['pool3'],
                                  32, [3, 3],
                                  activation_fn=leak_relu,
                                  scope='encoder/conv4',
                                  bn=True,
                                  is_training=is_training)  # H/2 * W/2
    net['conv5'] = tf_util.conv2d(net['conv4'],
                                  32, [3, 3],
                                  activation_fn=leak_relu,
                                  scope='encoder/conv5',
                                  bn=True,
                                  is_training=is_training)  # H/2 * W/2
    net['pool6'] = tf_util.max_pool2d(net['conv5'], [2, 2],
                                      scope='encoder/pool6',
                                      stride=[2, 2],
                                      padding='VALID')  # H/4 * W/4
    net['conv7'] = tf_util.conv2d(net['pool6'],
                                  64, [3, 3],
                                  activation_fn=leak_relu,
                                  scope='encoder/conv7',
                                  bn=True,
                                  is_training=is_training)  # H/4 * W/4
    net['conv8'] = tf_util.conv2d(net['conv7'],
                                  64, [3, 3],
                                  activation_fn=leak_relu,
                                  scope='encoder/conv8',
                                  bn=True,
                                  is_training=is_training)  # H/4 * W/4
    net['conv9'] = tf_util.conv2d(net['conv8'],
                                  64, [3, 3],
                                  activation_fn=leak_relu,
                                  scope='encoder/conv9',
                                  bn=True,
                                  is_training=is_training)  # H/4 * W/4
    net['pool10'] = tf_util.max_pool2d(net['conv9'], [2, 2],
                                       scope='encoder/pool10',
                                       stride=[2, 2],
                                       padding='VALID')  # H/8 * W/8
    net['conv11'] = tf_util.conv2d(net['pool10'],
                                   64, [3, 3],
                                   activation_fn=leak_relu,
                                   scope='encoder/conv11',
                                   bn=True,
                                   is_training=is_training)  # H/8 * W/8
    net['conv12'] = tf_util.conv2d(net['conv11'],
                                   64, [3, 3],
                                   activation_fn=leak_relu,
                                   scope='encoder/conv12',
                                   bn=True,
                                   is_training=is_training)  # H/8 * W/8
    net['conv13'] = tf_util.conv2d(net['conv12'],
                                   64, [3, 3],
                                   activation_fn=leak_relu,
                                   scope='encoder/conv13',
                                   bn=True,
                                   is_training=is_training)  # H/8 * W/8
    net['pool14'] = tf_util.max_pool2d(net['conv13'], [2, 2],
                                       scope='encoder/pool14',
                                       stride=[2, 2],
                                       padding='VALID')  # H/16 * W/16
    net['conv15'] = tf_util.conv2d(net['pool14'],
                                   128, [3, 3],
                                   activation_fn=leak_relu,
                                   scope='encoder/conv15',
                                   bn=True,
                                   is_training=is_training)  # H/16 * W/16
    net['conv16'] = tf_util.conv2d(net['conv15'],
                                   128, [3, 3],
                                   activation_fn=leak_relu,
                                   scope='encoder/conv16',
                                   bn=True,
                                   is_training=is_training)  # H/16 * W/16
    net['conv17'] = tf_util.conv2d(net['conv16'],
                                   128, [3, 3],
                                   activation_fn=leak_relu,
                                   scope='encoder/conv17',
                                   bn=True,
                                   is_training=is_training)  # H/16 * W/16
    net['pool18'] = tf_util.max_pool2d(net['conv17'], [2, 2],
                                       scope='encoder/pool18',
                                       stride=[2, 2],
                                       padding='VALID')  # H/32 * W/32
    net['conv19'] = tf_util.conv2d(net['pool18'],
                                   128, [3, 3],
                                   activation_fn=leak_relu,
                                   scope='encoder/conv19',
                                   bn=True,
                                   is_training=is_training)  # H/32 * W/32

    return net
Пример #15
0
def get_model(point_cloud, is_training, autoencoder=False, bn_decay=None):
    """ Autoencoder for point clouds.
    Input:
        point_cloud: TF tensor BxNx3
        is_training: boolean
        bn_decay: float between 0 and 1
    Output:
        net: TF tensor BxNx3, reconstructed point clouds
        end_points: dict
    """
    batch_size = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_point = point_cloud.get_shape()[2].value
    point_cloud = tf.reshape(
        point_cloud, [batch_size * num_pointclouds_per_query, num_point, 3])

    end_points = {}

    input_image = tf.expand_dims(point_cloud, -1)

    # Encoder
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv3',
                                bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')

    net = tf.reshape(global_feat, [batch_size * num_pointclouds_per_query, -1])
    feature = net

    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  bn_decay=bn_decay,
                                  scope='fc1')

    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  bn_decay=bn_decay,
                                  scope='fc2')

    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')

    net = tf.reshape(net, [batch_size, num_pointclouds_per_query, -1])

    ### net = (batch_size, num_pointclouds_per_query, -1)
    return net, feature
Пример #16
0
    def get_model_w_ae(self, input_x, is_training, reuse=False, bn_decay=None):
        """
        Classification PointNet, input is BxNx3, output Bx40
        """
        batch_size = self.configuration.batch_size
        num_point = self.configuration.n_input[0]
        end_points = {}
        # with tf.variable_scope('Classifier', reuse=tf.AUTO_REUSE) as scope:

        with tf.variable_scope('transform_net1') as sc:
            transform = self.input_transform_net(input_x,
                                                 is_training,
                                                 bn_decay,
                                                 K=3)
        point_cloud_transformed = tf.matmul(input_x, transform)
        end_points['first'] = point_cloud_transformed

        input_image = tf.expand_dims(point_cloud_transformed, -1)

        net = tf_util.conv2d(input_image,
                             64, [1, 3],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='conv1',
                             bn_decay=bn_decay)
        end_points['second'] = net
        net = tf_util.conv2d(net,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='conv2',
                             bn_decay=bn_decay)

        with tf.variable_scope('transform_net2') as sc:
            transform = self.feature_transform_net(net,
                                                   is_training,
                                                   bn_decay,
                                                   K=64)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])

        net = tf_util.conv2d(net_transformed,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='conv3',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             128, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='conv4',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             1024, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='conv5',
                             bn_decay=bn_decay)

        #print("before maxpool")
        #print(net.get_shape())
        end_points['pre_max'] = net
        # Symmetric function: max pooling
        net = tf_util.max_pool2d(net, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')
        #print("after maxpool")
        #print(net.get_shape())
        net = tf.reshape(net, [batch_size, -1])
        end_points['post_max'] = net

        #print("after reshape")
        #print(net.get_shape())
        net = tf_util.fully_connected(net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp2')
        end_points['final'] = net
        net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

        #print(end_points['pre_max'].get_shape())
        return net, end_points
Пример #17
0
def get_model(input_tensor, is_training, bn_decay = None):    
    weight_decay = 0.0
    num_point = input_tensor.get_shape()[1].value
    
    k = 40


    #Transform Net
    adj_matrix = tf_util.pairwise_distance(input_tensor)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(input_tensor, nn_idx=nn_idx, k=k)


    #Transform Net    
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature, is_training, bn_decay, K=input_tensor.get_shape()[2], is_dist=True)
    input_tensor_transformed = tf.matmul(input_tensor, transform)
    adj_matrix = tf_util.pairwise_distance(input_tensor_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(input_tensor_transformed, nn_idx=nn_idx, k=k)

    out1_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='one/adj_conv1', bn_decay=bn_decay, is_dist=True)

    
    out1_2 = tf_util.conv2d(out1_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='one/adj_conv2', bn_decay=bn_decay, is_dist=True)

        
    out1_3 = tf_util.conv2d(out1_2, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='one/adj_conv3', bn_decay=bn_decay, is_dist=True)

    net_1 = tf.reduce_max(out1_3, axis=-2, keepdims=True)



    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out2_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='two/adj_conv1', bn_decay=bn_decay, is_dist=True)

    out2_2 = tf_util.conv2d(out2_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='two/adj_conv2', bn_decay=bn_decay, is_dist=True)

    out2_3 = tf_util.conv2d(out2_2, 64, [1,1],
                            padding='VALID', stride=[1,1],
                            bn=True, is_training=is_training, weight_decay=weight_decay,
                            scope='two/adj_conv3', bn_decay=bn_decay, is_dist=True)
                            
    net_2 = tf.reduce_max(out2_3, axis=-2, keepdims=True)

      

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out3_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='three/adj_conv1', bn_decay=bn_decay, is_dist=True)


    out3_2 = tf_util.conv2d(out3_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='three/adj_conv2', bn_decay=bn_decay, is_dist=True)


    net_3 = tf.reduce_max(out3_2, axis=-2, keepdims=True)



    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1], 
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training,
                        scope='adj_conv7', bn_decay=bn_decay, is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')


    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, 
                                        net_1,
                                        net_2,
                                        net_3])

    # CONV 
    net = tf_util.conv2d(concat, 512, [1,1], padding='VALID', stride=[1,1],
                bn=True, is_training=is_training, scope='seg/conv1', is_dist=True)
    # net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv2', is_dist=True)
    # net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv3', is_dist=True)
    # net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv4', is_dist=True)
    # net = tf_util.conv2d(net, 32, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv5', is_dist=True)    
    
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    
    net = tf_util.conv2d(net, 16, [1,1], padding='VALID', stride=[1,1],
                activation_fn=None, scope='seg/output', is_dist=True)


    net = tf.squeeze(net, [2])


    return net
Пример #18
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64 // 4, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net,
                                          is_training,
                                          bn_decay,
                                          K=64 // 4)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat,
                         64 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat([point_feat, global_feat_expand], 3)
    print(concat_feat)

    net = tf_util.conv2d(concat_feat,
                         512 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv8',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128 // 4, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv9',
                         bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         3, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='conv10')
    #was hard-coded 50
    net = tf.squeeze(net, [2])  # BxNxC

    return net, end_points
Пример #19
0
def get_model(pc_xyz, pc_features, is_training, num_class, 
              bn_decay=None, use_t_net=True, pc_cls=None, cls_num=1, **kwargs):
    """ ConvNet baseline, input is BxNx3 gray image """
    end_points = {}
    num_point = pc_xyz.shape[1]

    if use_t_net:
        with tf.variable_scope('transform_net1') as sc:
            transform = get_transform(pc_xyz, is_training, bn_decay)
        pc_xyz = tf.matmul(pc_xyz, transform)
    # B x N x 3 x 1
    pc_xyz = tf.expand_dims(pc_xyz, -1) 

    ## to B * N * 1 * 1
    pc_features = tf.expand_dims(pc_features, -1)
    # to B x N x 4 x 1
    pc_xyz = tf.concat(axis=2, values=[pc_xyz, pc_features])

    # block one [64, 128, 128]
    K = 4
    out1 = tf_util.conv2d(pc_xyz, 64, [1, K], 
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
    out2 = tf_util.conv2d(out1, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
    out3 = tf_util.conv2d(out2, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)

    if use_t_net:
        with tf.variable_scope('transform_net2') as sc:
            K = 128
            transform = get_transform_K(out3, is_training, bn_decay, K)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.reshape(out3, [-1, num_point, 128]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])
    else:
        net_transformed = out3

    # block two [512, 2048]
    out4 = tf_util.conv2d(net_transformed, 512, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
    out5 = tf_util.conv2d(out4, 2048, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
    out_max = tf_util.max_pool2d(out5, [num_point, 1], padding='VALID', scope='maxpool')

    # segmentation network
    if pc_cls is not None:
        # pc_labels.shape (batch_size, cls_num)
        one_hot_label_expand = tf.reshape(pc_cls, [-1, 1, 1, cls_num])
        out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])

    out_max = tf.tile(out_max, [1, num_point, 1, 1])
    concat = tf.concat(axis=3, values=[out_max, out1, out2, out3, out4, out5])

    # block [256, 256, 128] -> 2
    net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv1')
    net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp1')
    net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv2')
    net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp2')
    net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv3')
    net2 = tf_util.conv2d(net2, num_class, [1,1], padding='VALID', stride=[1,1], activation_fn=None, 
                        bn=False, scope='seg/conv4')

    net2 = tf.reshape(net2, [-1, num_point, num_class])

    return net2, end_points
Пример #20
0
def get_model_my_model(point_cloud, is_training, bn_decay=None):
    
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20
    
    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)
    
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature, is_training, bn_decay, K=3)
    
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    
    # Conv 1
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed, nn_idx=nn_idx, k=k)
    
    net_local1 = tf_util.conv2d(edge_feature, 64, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn1', bn_decay=bn_decay)
    net_local1 = tf.reduce_max(net_local1, axis=-2, keep_dims=True)
    
    net_local1_intermediate = net_local1
    
    net_local1 = tf_util.conv2d(net_local1, 64, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn1', bn_decay=bn_decay, activation_fn=None)
    net_local1 = tf.reduce_max(net_local1, axis=-2, keep_dims=True)
    
    net_local1 += net_local1_intermediate
    net_local1 = tf.nn.relu(net_local1)
    
    #net1 = net_local1
    
    net_local_vector1 = tf_util.max_pool2d(net_local1, [num_point,1],
                         padding='VALID', scope='maxpool1')
    
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net_global1 = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    
    net_global_vector1 = tf_util.max_pool2d(net_global1, [num_point,1],
                           padding='VALID', scope='maxpool1')
    
    points_feat1_concat = tf.concat(axis=-1, values=[net_global_vector1, net_local_vector1])
    points_feat1_concat = tf.reduce_max(points_feat1_concat, axis=-2, keep_dims=True)
    
    # Conv 2
    adj_matrix = tf_util.pairwise_distance(points_feat1_concat)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat1_concat, nn_idx=nn_idx, k=k)
    
    net_local2 = tf_util.conv2d(edge_feature, 64, [1,1], padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training, scope='dgcnn2', bn_decay=bn_decay)
    net_local2 = tf.reduce_max(net_local2, axis=-2, keep_dims=True)
    #net2 = net_local2
    
    net_local_vector2 = tf_util.max_pool2d(net_local2, [num_point,1],
                         padding='VALID', scope='maxpool2')
    

    net_global2 = tf_util.conv2d(points_feat1_concat, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    
    net_global_vector2 = tf_util.max_pool2d(net_global2, [num_point,1],
                           padding='VALID', scope='maxpool2')
    
    points_feat2_concat = tf.concat(axis=-1, values=[net_global_vector2, net_local_vector2])
    
    # Conv 3
    adj_matrix = tf_util.pairwise_distance(points_feat2_concat)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat2_concat, nn_idx=nn_idx, k=k)
    
    net_local3 = tf_util.conv2d(edge_feature, 64, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn3', bn_decay=bn_decay)
    net_local3 = tf.reduce_max(net_local3, axis=-2, keep_dims=True)
    #net3 = net_local3
    
    net_local_vector3 = tf_util.max_pool2d(net_local3, [num_point,1],
                         padding='VALID', scope='maxpool3')
    

    net_global3 = tf_util.conv2d(points_feat2_concat, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    
    net_global_vector3 = tf_util.max_pool2d(net_global3, [num_point,1],
                           padding='VALID', scope='maxpool3')
    
    points_feat3_concat = tf.concat(axis=-1, values=[net_global_vector3, net_local_vector3])
    
    # Conv 4
    adj_matrix = tf_util.pairwise_distance(points_feat3_concat)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat3_concat, nn_idx=nn_idx, k=k)
    
    net_local4 = tf_util.conv2d(edge_feature, 128, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn4', bn_decay=bn_decay)
    net_local4 = tf.reduce_max(net_local4, axis=-2, keep_dims=True)
    #net4 = net_local4
    
    net_local_vector4 = tf_util.max_pool2d(net_local4, [num_point,1],
                         padding='VALID', scope='maxpool4')
    

    net_global4 = tf_util.conv2d(points_feat3_concat, 128, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    
    net_global_vector4 = tf_util.max_pool2d(net_global4, [num_point,1],
                           padding='VALID', scope='maxpool4')
    
    points_feat4_concat = tf.concat(axis=-1, values=[net_global_vector4, net_local_vector4])
    
    # Conv 5
    net_concat = tf_util.conv2d(tf.concat([points_feat1_concat, points_feat2_concat, points_feat3_concat, points_feat4_concat], axis=-1), 1024, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='conv5', bn_decay=bn_decay)
    
    # Symmetry Aggregation
    net_agg = tf_util.max_pool2d(net_concat, [num_point,1],
                         padding='VALID', scope='maxpool_agg')
    
    net = tf.reshape(net_agg, [batch_size, -1])
    #net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
    #                              scope='fc1', bn_decay=bn_decay)
    #net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                      scope='dp1')
    #net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
    #                              scope='fc2', bn_decay=bn_decay)
    #net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                      scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
    
    return net, end_points
Пример #21
0
def get_model_rbf(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    point_cloud_transformed = tf.expand_dims(point_cloud_transformed, 3)

    c1 = 1024
    #centroids = tf.constant(np.random.randn(1, 1, 3, 1024), dtype=tf.float32)
    centroids = tf.get_variable('centroids', [1, 1, 3, c1],
                                initializer=tf.constant_initializer(
                                    0.5 * np.random.randn(1, 1, 3, c1)),
                                dtype=tf.float32)
    #the per-centroids weights to change the shape of the multi-norm
    weights = tf.get_variable(
        'weights',
        [1, 1, 4, c1],
        initializer=tf.constant_initializer(0.01 *
                                            np.random.randn(1, 1, 3, c1)),
    )

    feature = tf.tile(point_cloud_transformed, [1, 1, 1, c1])

    bias = tf.tile(centroids, [batch_size, num_point, 1, 1])

    net = tf.subtract(feature, bias)
    net = tf.exp(net)
    net = tf.exp(-tf.concat(
        [
            tf.norm(net, ord=0.5, axis=2, keep_dims=True),
            #tf.norm(net, ord=0.8, axis=2, keep_dims=True),
            tf.norm(net, ord=1, axis=2, keep_dims=True),
            #tf.norm(net, ord=1.5, axis=2, keep_dims=True),
            tf.norm(net, ord=2, axis=2, keep_dims=True),
            #tf.norm(net, ord=3, axis=2, keep_dims=True),
            #tf.norm(net, ord=4, axis=2, keep_dims=True),
            tf.norm(net, ord=np.inf, axis=2, keep_dims=True),
        ],
        axis=2))
    net = tf.multiply(net, tf.tile(weights, [batch_size, num_point, 1, 1]))
    #net = tf.exp(-net)
    # Symmetric function: max pooling
    features = tf_util.max_pool2d(net, [num_point, 1],
                                  padding='VALID',
                                  scope='maxpool')
    net = tf.transpose(features, perm=[0, 1, 3, 2])
    net = tf_util.conv2d(net,
                         3, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='mini_conv1',
                         bn_decay=bn_decay)
    net = tf.reshape(net, [batch_size, -1])
    #net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,
    #                              scope='fc0', bn_decay=bn_decay)
    #net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                      scope='dp1')
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points, features, centroids
Пример #22
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ ConvNet baseline, input is BxNx9 gray image """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    point_cloud_xyz = point_cloud[:, :, 6:]

    edge_feature, idx = tf_util.pointSIFT_KNN(0.15, point_cloud_xyz)

    _, xyz = tf_util.group(point_cloud_xyz, idx)

    feature, _ = tf_util.group(point_cloud, idx)


    net = tf_util.ngcn_chebyshev(inputs=feature, num_output_channels=64,
                              local_cord=xyz,
                              bn=False, is_training=is_training,
                              scope='spec_conv%d' % (0), bn_decay=bn_decay)

    net_1 = tf.reduce_max(net, axis=-2, keep_dims=True)
    
    feature, _ = tf_util.group(tf.squeeze(net_1, axis=-2), idx)

    net = tf_util.ngcn_chebyshev(inputs=feature, num_output_channels=64,
                              local_cord=xyz,
                              bn=False, is_training=is_training,
                              scope='spec_conv%d' % (1), bn_decay=bn_decay)

    net_2 = tf.reduce_max(net, axis=-2, keep_dims=True)

    feature, _ = tf_util.group(tf.squeeze(net_2, axis=-2), idx)

    net = tf_util.ngcn_chebyshev(inputs=feature, num_output_channels=128,
                              local_cord=xyz,
                              bn=False, is_training=is_training,
                              scope='spec_conv%d' % (2), bn_decay=bn_decay)

    net_3 = tf.reduce_max(net, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1],
                          padding='VALID', stride=[1, 1],
                          bn=False, is_training=is_training,
                          scope='adj_conv7', bn_decay=bn_decay, is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')

    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand,
                                       net_1,
                                       net_2,
                                       net_3])

    # CONV
    net = tf_util.conv2d(concat, 512, [1, 1], padding='VALID', stride=[1, 1],
                         bn=False, is_training=is_training, scope='seg/conv1', is_dist=True)
    net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, 1],
                         bn=False, is_training=is_training, scope='seg/conv2', is_dist=True)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
    net = tf_util.conv2d(net, 13, [1, 1], padding='VALID', stride=[1, 1],
                         activation_fn=None, scope='seg/conv3', is_dist=True)
    net = tf.squeeze(net, [2])



    return net
Пример #23
0
def get_model_elm(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    random_weights = tf.constant(np.random.randn(3, 4096), dtype=tf.float32)
    random_weights1 = tf.expand_dims(random_weights, 0)
    random_weights1 = tf.concat([random_weights1, random_weights1], axis=0)  #2
    random_weights1 = tf.concat([random_weights1, random_weights1], axis=0)  #4
    random_weights1 = tf.concat([random_weights1, random_weights1], axis=0)  #8
    random_weights1 = tf.concat([random_weights1, random_weights1],
                                axis=0)  #16
    random_weights1 = tf.concat([random_weights1, random_weights1],
                                axis=0)  #32

    net = tf.matmul(point_cloud, random_weights1)
    net = tf.expand_dims(net, 2)

    # Symmetric function: max pooling
    features = tf_util.max_pool2d(net, [num_point, 1],
                                  padding='VALID',
                                  scope='maxpool')

    net = tf.reshape(features, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  1024,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc0',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points, features
Пример #24
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
        batch_size, num_point, weight_decay, graphnum, featnum,  bn_decay=None):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 30

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -2)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
    net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

    out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1),
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out3A, net_max_1A, net_mean_1A = tf_util.offset_deform(
        input_image,
        out3,
        scope="trans_conv0",
        num_neighbor=k,
        num_graph=graphnum[0],
        num_feat=featnum[0],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)

    out5, net_max_2, net_mean_2 = tf_util.offset_deform(
        input_image,
        out3A,
        scope="trans_conv1",
        num_neighbor=k,
        num_graph=graphnum[0],
        num_feat=featnum[0],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)

    out7, net_max_3, net_mean_3 = tf_util.offset_deform(
        input_image,
        out5,
        scope="trans_conv2",
        num_neighbor=k,
        num_graph=graphnum[1],
        num_feat=featnum[1],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)
    '''adj = tf_util.pairwise_distance(tf.squeeze(trans2, axis=-2))
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(tf.concat([out5,trans2], axis = -1), nn_idx=nn_idx, k=k)

    out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                                             padding='VALID', stride=[1,1],
                                             bn=True, is_training=is_training, weight_decay=weight_decay,
                                             scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
    net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1), 64, [1,1],
                                             padding='VALID', stride=[1,1],
                                             bn=True, is_training=is_training, weight_decay=weight_decay,
                                             scope='adj_conv7', bn_decay=bn_decay, is_dist=True)'''

    out8 = tf_util.conv2d(tf.concat([out3, out5, out7], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv13',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out8, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          128, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3,
                       values=[
                           expand, net_max_1, net_mean_1, out3, net_max_2,
                           net_mean_2, out5, net_max_3, net_mean_3, out7, out8
                       ])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Пример #25
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2', reuse=tf.AUTO_REUSE) as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    features = tf_util.max_pool2d(net, [num_point, 1],
                                  padding='VALID',
                                  scope='maxpool')

    net = tf.reshape(features, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points, features
Пример #26
0
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec, is_training,
                                 bn_decay, end_points):
    ''' 3D Box Estimation PointNet v1 network.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            point clouds in object coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
            including box centers, heading bin class scores and residuals,
            and size cluster scores and residuals
    '''
    num_point = object_point_cloud.get_shape()[1].value
    net = tf.expand_dims(object_point_cloud, 2)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg4',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool2')
    net = tf.squeeze(net, axis=[1, 2])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net,
                                  512,
                                  scope='fc1',
                                  bn=True,
                                  is_training=is_training,
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  scope='fc2',
                                  bn=True,
                                  is_training=is_training,
                                  bn_decay=bn_decay)

    # The first 3 numbers: box center coordinates (cx,cy,cz),
    # the next NUM_HEADING_BIN*2:  heading bin class scores and bin residuals
    # next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
    output = tf_util.fully_connected(net,
                                     3 + NUM_HEADING_BIN * 2 +
                                     NUM_SIZE_CLUSTER * 4,
                                     activation_fn=None,
                                     scope='fc3')
    return output, end_points
Пример #27
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ ConvNet baseline, input is BxNx9 gray image """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 20
    weight_decay = 0  # 添加的

    adj = tf_util.pairwise_distance(point_cloud[:, :,
                                                6:])  # in:B*N*3 out:B*N*N
    nn_idx = tf_util.knn(adj,
                         k=k)  # (batch, num_points, k)           # out:B*N*K
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx,
                                            k=k)  # out:B*N*K*18

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out3 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out4 = tf_util.conv2d(out3,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv4',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out5 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv5',
                          bn_decay=bn_decay,
                          is_dist=True)

    # out6 = tf_util.conv2d(out5, 64, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training, weight_decay=weight_decay,
    #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

    # CONV
    net = tf_util.conv2d(concat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv1',
                         is_dist=True)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv2',
                         is_dist=True)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv2d(net,
                         13, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='seg/conv3',
                         is_dist=True)
    net = tf.squeeze(net, [2])

    return net
Пример #28
0
def get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay,
                            end_points):
    ''' 3D instance segmentation PointNet v1 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    net = tf.expand_dims(point_cloud, 2)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv3',
                                bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')

    global_feat = tf.concat(
        [global_feat,
         tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)],
        axis=3)
    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])

    net = tf_util.conv2d(concat_feat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv8',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv9',
                         bn_decay=bn_decay)
    net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)

    logits = tf_util.conv2d(net,
                            2, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            activation_fn=None,
                            scope='conv10')
    logits = tf.squeeze(logits, [2])  # BxNxC
    return logits, end_points
Пример #29
0
def input_transform_net(point_cloud, is_training, bn_decay=None, K=3):
    #输入(XYZ)变换网络,输入是BxNx3灰度图像
    #返回:大小为3xK的变换矩阵
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        assert (K == 3)
        weights = tf.get_variable('weights', [256, 3 * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [3 * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 3, K])
    return transform
Пример #30
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    input_image = tf.expand_dims(point_cloud, -1)

    # Point functions (MLP implemented as conv2d)
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Пример #31
0
def input_transform_net(edge_feature, is_training, bn_decay=None, K=3):
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
      Return:
        Transformation matrix of size 3xK """
    batch_size = edge_feature.get_shape()[0].value
    num_point = edge_feature.get_shape()[1].value

    # input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=-2, keep_dims=True)

    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        # assert(K==3)
        with tf.device('/cpu:0'):
            weights = tf.get_variable('weights', [256, K * K],
                                      initializer=tf.constant_initializer(0.0),
                                      dtype=tf.float32)
            biases = tf.get_variable('biases', [K * K],
                                     initializer=tf.constant_initializer(0.0),
                                     dtype=tf.float32)
        biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, K, K])
    return transform
Пример #32
0
def get_model_dg(point_cloud, is_training, bn_decay=None, K=4):
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """

    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    #input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=-2, keep_dims=True)

    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        # assert(K==3)
        weights = tf.get_variable('weights', [256, 4],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [4],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 4])
    return transform
Пример #33
0
def get_model(point_cloud, is_training, bn_decay=None, K=4):
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    #num_point = tf.shape(point_cloud)[1]

    input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')
    #net = tf.reduce_max(net, axis=[2])

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        # assert(K==3)
        weights = tf.get_variable('weights', [256, K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [4],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 4])

    transform = tf.nn.l2_normalize(transform, dim=1)

    return transform
Пример #34
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
        #print(transform.shape)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)
    
    #find the maxmun and minmun    
    input_image_add = tf.reduce_sum(input_image, 2, keep_dims=True)
    order = np.zeros([1,1024])
    order[1,1] = tf.argmax(input_image_add, axis=1)
    order[1,-1] = tf.argmin(input_image_add, axis=1)
    
    
    print()    

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    #print(net.shape)                     
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    #print(net.shape)                     
    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    #print(net.shape)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Пример #35
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Autoencoder for point clouds.
    Input:
        point_cloud: TF tensor BxNx3
        is_training: boolean
        bn_decay: float between 0 and 1
    Output:
        net: TF tensor BxNx3, reconstructed point clouds
        end_points: dict
    """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    assert (num_point == 2048)
    point_dim = point_cloud.get_shape()[2].value
    end_points = {}

    input_image = tf.expand_dims(point_cloud, -1)

    # Encoder
    net = tf_util.conv2d(input_image,
                         64, [1, point_dim],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv3',
                                bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')

    net = tf.reshape(global_feat, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc00',
                                  bn_decay=bn_decay)
    embedding = tf.reshape(net, [batch_size, -1])
    end_points['embedding'] = embedding

    # FC Decoder
    net = tf_util.fully_connected(embedding,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  1024 * 3,
                                  activation_fn=None,
                                  scope='fc3')
    pc_fc = tf.reshape(net, (batch_size, -1, 3))

    # UPCONV Decoder
    net = tf.reshape(embedding, [batch_size, 1, 1, -1])
    net = tf_util.conv2d_transpose(net,
                                   512,
                                   kernel_size=[2, 2],
                                   stride=[1, 1],
                                   padding='VALID',
                                   scope='upconv1',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    net = tf_util.conv2d_transpose(net,
                                   256,
                                   kernel_size=[3, 3],
                                   stride=[1, 1],
                                   padding='VALID',
                                   scope='upconv2',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    net = tf_util.conv2d_transpose(net,
                                   256,
                                   kernel_size=[4, 4],
                                   stride=[2, 2],
                                   padding='VALID',
                                   scope='upconv3',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    net = tf_util.conv2d_transpose(net,
                                   128,
                                   kernel_size=[5, 5],
                                   stride=[3, 3],
                                   padding='VALID',
                                   scope='upconv4',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    net = tf_util.conv2d_transpose(net,
                                   3,
                                   kernel_size=[1, 1],
                                   stride=[1, 1],
                                   padding='VALID',
                                   scope='upconv5',
                                   activation_fn=None)
    end_points['xyzmap'] = net
    pc_upconv = tf.reshape(net, [batch_size, -1, 3])

    # Set union
    net = tf.concat(values=[pc_fc, pc_upconv], axis=1)

    return net, end_points
Пример #36
0
def get_model_point_net(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    input_image = tf.expand_dims(point_cloud, -1)

    # Point functions (MLP implemented as conv2d)
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        # assert(K==3)
        weights = tf.get_variable('weights', [256, 4],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [4],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 4])

    # net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                       scope='dp1')
    # net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return transform  #, end_points
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
		batch_size, num_point, weight_decay, bn_decay=None):
    """ ConvNet baseline, input is BxNx3 gray image """
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        K = 3
        transform = get_transform(point_cloud, is_training, bn_decay, K = 3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    out1 = tf_util.conv2d(input_image, 64, [1,K], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
    out2 = tf_util.conv2d(out1, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
    out3 = tf_util.conv2d(out2, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)


    with tf.variable_scope('transform_net2') as sc:
        K = 128
        transform = get_transform_K(out3, is_training, bn_decay, K)

    end_points['transform'] = transform

    squeezed_out3 = tf.reshape(out3, [batch_size, num_point, 128])
    net_transformed = tf.matmul(squeezed_out3, transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    out4 = tf_util.conv2d(net_transformed, 512, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
    out5 = tf_util.conv2d(out4, 2048, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
    out_max = tf_util.max_pool2d(out5, [num_point,1], padding='VALID', scope='maxpool')

    # classification network
    net = tf.reshape(out_max, [batch_size, -1])
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='cla/dp1')
    net = tf_util.fully_connected(net, cat_num, activation_fn=None, scope='cla/fc3')

    # segmentation network
    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])

    expand = tf.tile(out_max, [1, num_point, 1, 1])
    concat = tf.concat(axis=3, values=[expand, out1, out2, out3, out4, out5])

    net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay)
    net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp1')
    net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay)
    net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp2')
    net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay)
    net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None, 
                        bn=False, scope='seg/conv4', weight_decay=weight_decay)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net, net2, end_points
Пример #38
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Autoencoder for point clouds.
    Input:
        point_cloud: TF tensor BxNxC
        is_training: boolean
        bn_decay: float between 0 and 1
    Output:
        net: TF tensor BxNxC, reconstructed point clouds
        end_points: dict
    """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    point_dim = point_cloud.get_shape()[2].value
    end_points = {}

    input_image = tf.expand_dims(point_cloud, -1)

    # Encoder
    net = tf_util.conv2d(input_image,
                         64, [1, point_dim],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv3',
                                bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')

    net = tf.reshape(global_feat, [batch_size, -1])
    end_points['embedding'] = net

    # FC Decoder
    net = tf_util.fully_connected(net,
                                  1024,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  1024,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  num_point * 3,
                                  activation_fn=None,
                                  scope='fc3')
    net = tf.reshape(net, (batch_size, num_point, 3))

    return net, end_points
def get_model(point_cloud, is_training, bn_decay=None):
    """ ConvNet baseline, input is BxNx3 gray image """
    #import pdb
    #pdb.set_trace()

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    input_image = tf.expand_dims(point_cloud, -1)
    # CONV
    net = tf_util.conv2d(input_image,
                         64, [1, num],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    #import pdb
    #pdb.set_trace()

    points_feat1 = tf_util.conv2d(net,
                                  1024, [1, 1],
                                  padding='VALID',
                                  stride=[1, 1],
                                  bn=True,
                                  is_training=is_training,
                                  scope='conv5',
                                  bn_decay=bn_decay)
    # MAX
    pc_feat1 = tf_util.max_pool2d(points_feat1, [num_point, 1],
                                  padding='VALID',
                                  scope='maxpool1')
    # FC
    pc_feat1 = tf.reshape(pc_feat1, [batch_size, -1])
    pc_feat1 = tf_util.fully_connected(pc_feat1,
                                       256,
                                       bn=True,
                                       is_training=is_training,
                                       scope='fc1',
                                       bn_decay=bn_decay)
    pc_feat1 = tf_util.fully_connected(pc_feat1,
                                       128,
                                       bn=True,
                                       is_training=is_training,
                                       scope='fc2',
                                       bn_decay=bn_decay)
    print(pc_feat1)

    # CONCAT
    pc_feat1_expand = tf.tile(tf.reshape(pc_feat1, [batch_size, 1, 1, -1]),
                              [1, num_point, 1, 1])
    points_feat1_concat = tf.concat(axis=3,
                                    values=[points_feat1, pc_feat1_expand])

    # CONV
    net = tf_util.conv2d(points_feat1_concat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6')
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7')
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv2d(net,
                         classes, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='conv8')
    net = tf.squeeze(net, [2])

    return net
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx4, onehotvec is Bx3, output BxNx2 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    input_image = tf.expand_dims(point_cloud, -1)

    net = tf_util.conv2d(input_image, 64, [1,6],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')
    print global_feat

    global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
    print 'Global Feat: ', global_feat
    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    print point_feat, global_feat_expand
    concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
    print concat_feat

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)
    net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)

    logits = tf_util.conv2d(net, 2, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    logits = tf.squeeze(logits, [2]) # BxNxC
    print logits
    
    print '-----------'
    #net = tf.concat(axis=3, values=[net, tf.expand_dims(tf.slice(point_cloud, [0,0,0], [-1,-1,3]), 2)])
    mask = tf.slice(logits,[0,0,0],[-1,-1,1]) < tf.slice(logits,[0,0,1],[-1,-1,1])
    mask = tf.to_float(mask) # BxNx1
    mask_count = tf.tile(tf.reduce_sum(mask,axis=1,keep_dims=True), [1,1,3]) # Bx1x3
    print mask
    point_cloud_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3]) # BxNx3

    # ---- Subtract points mean ----
    mask_xyz_mean = tf.reduce_sum(tf.tile(mask, [1,1,3])*point_cloud_xyz, axis=1, keep_dims=True) # Bx1x3
    mask_xyz_mean = mask_xyz_mean/tf.maximum(mask_count,1) # Bx1x3
    point_cloud_xyz_stage1 = point_cloud_xyz - tf.tile(mask_xyz_mean, [1,num_point,1])
    print 'Point cloud xyz stage1: ', point_cloud_xyz_stage1

    # ---- Regress 1st stage center ----
    net = tf.expand_dims(point_cloud_xyz_stage1, 2)
    print net
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3-stage1', bn_decay=bn_decay)
    mask_expand = tf.tile(tf.expand_dims(mask,-1), [1,1,1,256])
    masked_net = net*mask_expand
    print masked_net
    net = tf_util.max_pool2d(masked_net, [num_point,1], padding='VALID', scope='maxpool-stage1')
    net = tf.squeeze(net, axis=[1,2])
    print net
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True, is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True, is_training=is_training, bn_decay=bn_decay)
    stage1_center = tf_util.fully_connected(net, 3, activation_fn=None, scope='fc3-stage1')
    stage1_center = stage1_center + tf.squeeze(mask_xyz_mean, axis=1) # Bx3
    end_points['stage1_center'] = stage1_center

    # ---- Subtract stage1 center ----
    point_cloud_xyz_submean = point_cloud_xyz - tf.expand_dims(stage1_center, 1)
    print 'Point cloud xyz submean: ', point_cloud_xyz_submean

    net = tf.expand_dims(point_cloud_xyz_submean, 2)
    print net
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg4', bn_decay=bn_decay)
    mask_expand = tf.tile(tf.expand_dims(mask,-1), [1,1,1,512])
    masked_net = net*mask_expand
    print masked_net
    net = tf_util.max_pool2d(masked_net, [num_point,1], padding='VALID', scope='maxpool2')
    net = tf.squeeze(net, axis=[1,2])
    print net
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 512, scope='fc1', bn=True, is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, scope='fc2', bn=True, is_training=is_training, bn_decay=bn_decay)

    # First 3 are cx,cy,cz, next NUM_HEADING_BIN*2 are for heading
    # next NUM_SIZE_CLUSTER*4 are for dimension
    output = tf_util.fully_connected(net, 3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
    print output

    center = tf.slice(output, [0,0], [-1,3])
    center = center + stage1_center # Bx3
    end_points['center'] = center

    heading_scores = tf.slice(output, [0,3], [-1,NUM_HEADING_BIN])
    heading_residuals_normalized = tf.slice(output, [0,3+NUM_HEADING_BIN], [-1,NUM_HEADING_BIN])
    end_points['heading_scores'] = heading_scores # BxNUM_HEADING_BIN
    end_points['heading_residuals_normalized'] = heading_residuals_normalized # BxNUM_HEADING_BIN (should be -1 to 1)
    end_points['heading_residuals'] = heading_residuals_normalized * (np.pi/NUM_HEADING_BIN) # BxNUM_HEADING_BIN
    
    size_scores = tf.slice(output, [0,3+NUM_HEADING_BIN*2], [-1,NUM_SIZE_CLUSTER]) # BxNUM_SIZE_CLUSTER
    size_residuals_normalized = tf.slice(output, [0,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER], [-1,NUM_SIZE_CLUSTER*3])
    size_residuals_normalized = tf.reshape(size_residuals_normalized, [batch_size, NUM_SIZE_CLUSTER, 3]) # BxNUM_SIZE_CLUSTERx3
    end_points['size_scores'] = size_scores
    end_points['size_residuals_normalized'] = size_residuals_normalized
    end_points['size_residuals'] = size_residuals_normalized * tf.expand_dims(tf.constant(mean_size_arr, dtype=tf.float32), 0)

    return logits, end_points