Exemple #1
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Set abstraction layers
    # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
    # So we only use NCHW for layer 1 until this issue can be resolved.
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemple #2
0
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
def get_model(point_cloud, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [32,64,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4,0.8], [64,128], [[128,128,256],[128,196,256]], is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')

    cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
def get_3d_box_estimation_v2_net(object_point_cloud, one_hot_vec,
                                 is_training, bn_decay, end_points):
    ''' 3D Box Estimation PointNet v2 network.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            masked point clouds in object coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
            including box centers, heading bin class scores and residuals,
            and size cluster scores and residuals
    ''' 
    # Gather object points
    batch_size = object_point_cloud.get_shape()[0].value

    l0_xyz = object_point_cloud
    l0_points = None
    # Set abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,
        npoint=128, radius=0.2, nsample=64, mlp=[64,64,128],
        mlp2=None, group_all=False,
        is_training=is_training, bn_decay=bn_decay, scope='ssg-layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,
        npoint=32, radius=0.4, nsample=64, mlp=[128,128,256],
        mlp2=None, group_all=False,
        is_training=is_training, bn_decay=bn_decay, scope='ssg-layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,
        npoint=None, radius=None, nsample=None, mlp=[256,256,512],
        mlp2=None, group_all=True,
        is_training=is_training, bn_decay=bn_decay, scope='ssg-layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 512, bn=True,
        is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True,
        is_training=is_training, scope='fc2', bn_decay=bn_decay)

    # The first 3 numbers: box center coordinates (cx,cy,cz),
    # the next NUM_HEADING_BIN*2:  heading bin class scores and bin residuals
    # next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
    output = tf_util.fully_connected(net,
        3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
    return output, end_points
def get_instance_seg_v2_net(point_cloud, one_hot_vec,
                            is_training, bn_decay, end_points):
    ''' 3D instance segmentation PointNet v2 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''

    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,1])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points,
        128, [0.2,0.4,0.8], [32,64,128],
        [[32,32,64], [64,64,128], [64,96,128]],
        is_training, bn_decay, scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points,
        32, [0.4,0.8,1.6], [64,64,128],
        [[64,64,128], [128,128,256], [128,128,256]],
        is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points,
        npoint=None, radius=None, nsample=None, mlp=[128,256,1024],
        mlp2=None, group_all=True, is_training=is_training,
        bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l3_points = tf.concat([l3_points, tf.expand_dims(one_hot_vec, 1)], axis=2)
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
        [128,128], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
        [128,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
        tf.concat([l0_xyz,l0_points],axis=-1), l1_points,
        [128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
        is_training=is_training, scope='conv1d-fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.7,
        is_training=is_training, scope='dp1')
    logits = tf_util.conv1d(net, 2, 1,
        padding='VALID', activation_fn=None, scope='conv1d-fc2')

    return logits, end_points
Exemple #7
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = point_cloud
    l0_points = None

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [16,32,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1', use_nchw=True)
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2,0.4,0.8], [32,64,128], [[64,64,128], [128,128,256], [128,128,256]], is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemple #8
0
def get_model(point_cloud,
              is_training,
              use_normal=False,
              bn_decay=None,
              weight_decay=None,
              num_class=40,
              adaptive_sample=False):
    """ Classification PointNet, input is BxNx3, output Bx40 """

    batch_size = point_cloud.get_shape()[0].value
    end_points = {}
    if use_normal:
        l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
        l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])
    else:
        l0_xyz = point_cloud
        l0_points = point_cloud

    end_points['l0_xyz'] = l0_xyz
    as_neighbor = [12, 12] if adaptive_sample else [0, 0]

    # Set abstraction layers
    l1_xyz, l1_points = PointASNLSetAbstraction(l0_xyz,
                                                l0_points,
                                                npoint=512,
                                                nsample=32,
                                                mlp=[64, 64, 128],
                                                is_training=is_training,
                                                bn_decay=bn_decay,
                                                weight_decay=weight_decay,
                                                scope='layer1',
                                                as_neighbor=as_neighbor[0])
    end_points['l1_xyz'] = l1_xyz
    l2_xyz, l2_points = PointASNLSetAbstraction(l1_xyz,
                                                l1_points,
                                                npoint=128,
                                                nsample=64,
                                                mlp=[128, 128, 256],
                                                is_training=is_training,
                                                bn_decay=bn_decay,
                                                weight_decay=weight_decay,
                                                scope='layer2',
                                                as_neighbor=as_neighbor[1])
    end_points['l2_xyz'] = l1_xyz
    _, l3_points_res, _ = pointnet_sa_module(l1_xyz,
                                             l1_points,
                                             npoint=None,
                                             radius=None,
                                             nsample=None,
                                             mlp=[128, 256, 512],
                                             mlp2=None,
                                             group_all=True,
                                             is_training=is_training,
                                             bn_decay=bn_decay,
                                             scope='layer3_1')
    _, l3_points, _ = pointnet_sa_module(l2_xyz,
                                         l2_points,
                                         npoint=None,
                                         radius=None,
                                         nsample=None,
                                         mlp=[256, 512, 1024],
                                         mlp2=None,
                                         group_all=True,
                                         is_training=is_training,
                                         bn_decay=bn_decay,
                                         scope='layer3_2')

    # Fully connected layers
    l3_points = tf.reshape(l3_points, [batch_size, -1])
    l3_points_res = tf.reshape(l3_points_res, [batch_size, -1])
    net = tf.concat([l3_points, l3_points_res], axis=-1)
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.4,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.4,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net,
                                  num_class,
                                  activation_fn=None,
                                  scope='fc3')

    return net, end_points
Exemple #9
0
def corrs_flow_pred_net(xyz1,
                        xyz2,
                        net1,
                        net2,
                        scopename,
                        reuse,
                        is_training,
                        bn_decay,
                        nsmp=256,
                        nfea=64):
    #########################################
    # input
    #   xyz1, xyz2: (B x N x 3)
    #   net1, net2: (B x N x nfea)
    # output
    #   pred_flow: (B x N x 3)
    #   pred_vismask: (B x N)
    #   fpsidx1, fpsidx2: (B x nsmp)
    #   matching_score_sub: (B x nsmp x nsmp)
    #########################################
    num_point = xyz1.get_shape()[1].value
    with tf.variable_scope(scopename) as myscope:
        if reuse:
            myscope.reuse_variables()
        # sub-sample to predict vismask and flow
        fpsidx1 = farthest_point_sample(nsmp, xyz1)  # Bxnsmp
        idx = tf.where(tf.greater_equal(fpsidx1, 0))
        fpsidx1 = tf.concat((tf.expand_dims(tf.cast(
            idx[:, 0], tf.int32), -1), tf.reshape(fpsidx1, [-1, 1])), 1)
        xyz1_sub = tf.reshape(tf.gather_nd(xyz1, fpsidx1), [-1, nsmp, 3])
        net1_sub = tf.reshape(tf.gather_nd(net1, fpsidx1), [-1, nsmp, nfea])

        fpsidx2 = farthest_point_sample(nsmp, xyz2)  # Bxnsmp
        idx = tf.where(tf.greater_equal(fpsidx2, 0))
        fpsidx2 = tf.concat((tf.expand_dims(tf.cast(
            idx[:, 0], tf.int32), -1), tf.reshape(fpsidx2, [-1, 1])), 1)
        xyz2_sub = tf.reshape(tf.gather_nd(xyz2, fpsidx2), [-1, nsmp, 3])
        net2_sub = tf.reshape(tf.gather_nd(net2, fpsidx2), [-1, nsmp, nfea])

        net_combined_sub = tf.concat(
            (tf.tile(tf.expand_dims(net1_sub, 2), [1, 1, nsmp, 1]),
             tf.tile(tf.expand_dims(net2_sub, 1), [1, nsmp, 1, 1])), -1)

        mlp_maskpred = [128, 128, 128]
        for i, num_out_channel in enumerate(mlp_maskpred):
            net_combined_sub = tf_util.conv2d(net_combined_sub,
                                              num_out_channel, [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=True,
                                              is_training=is_training,
                                              scope='conv%d_maskpred' % (i),
                                              bn_decay=bn_decay)
        pred_vismask_sub = tf.reduce_max(net_combined_sub, 2, keep_dims=True)

        mlp2_maskpred = [128, 64, 32]

        for i, num_out_channel in enumerate(mlp2_maskpred):
            pred_vismask_sub = tf_util.conv2d(pred_vismask_sub,
                                              num_out_channel, [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=True,
                                              is_training=is_training,
                                              scope='conv_post_%d_maskpred' %
                                              (i),
                                              bn_decay=bn_decay)

        pred_vismask_sub = tf_util.conv2d(pred_vismask_sub,
                                          1, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          scope='conv_out_maskpred',
                                          activation_fn=None)
        pred_vismask_sub = tf.squeeze(pred_vismask_sub, [2])
        pred_vismask = pointnet_fp_module(xyz1,
                                          xyz1_sub,
                                          None,
                                          pred_vismask_sub, [],
                                          tf.constant(True),
                                          None,
                                          scope='interp_layer')
        pred_vismask = tf.squeeze(pred_vismask, 2)  # B x nsmp
        pred_vismask_sub = tf.stop_gradient(
            tf.sigmoid(pred_vismask_sub))  # B x nsmp x 1

        mlp0 = [8]
        for i, num_out_channel in enumerate(mlp0):
            net_combined_sub = tf_util.conv2d(net_combined_sub,
                                              num_out_channel, [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=True,
                                              is_training=is_training,
                                              scope='conv_prev_%d' % (i),
                                              bn_decay=bn_decay)
        net_combined_sub = tf_util.conv2d(net_combined_sub,
                                          1, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          scope='conv_prev_3',
                                          activation_fn=None)
        U = tf.nn.softmax(net_combined_sub, 2)  # B x nsmp x nsmp x 1
        matching_score_sub = tf.squeeze(net_combined_sub, -1)

        # mask prob
        U = tf.concat(
            (tf.multiply(U, tf.expand_dims(pred_vismask_sub, 2)),
             tf.expand_dims(xyz2_sub, 1) - tf.expand_dims(xyz1_sub, 2)), -1)

        mlp = [32, 64, 128, 256]
        for i, num_out_channel in enumerate(mlp):
            U = tf_util.conv2d(U,
                               num_out_channel, [1, 1],
                               padding='VALID',
                               stride=[1, 1],
                               bn=True,
                               is_training=is_training,
                               scope='conv%d' % (i),
                               bn_decay=bn_decay)
        U = tf.reduce_max(U, 2)
        l1_xyz = xyz1_sub

        # mask energy
        l1_points = tf.concat((U, pred_vismask_sub), -1)

        l2_xyz, l2_points, l2_indices = pointnet_sa_module(
            l1_xyz,
            l1_points,
            npoint=128,
            radius=0.4,
            nsample=32,
            mlp=[128, 128, 256],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='corrs_layer2')
        l3_xyz, l3_points, l3_indices = pointnet_sa_module(
            l2_xyz,
            l2_points,
            npoint=None,
            radius=None,
            nsample=None,
            mlp=[256, 512, 1024],
            mlp2=None,
            group_all=True,
            use_xyz=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='corrs_layer3')
        # Feature Propagation layers
        l2_points = pointnet_fp_module(l2_xyz,
                                       l3_xyz,
                                       l2_points,
                                       l3_points, [256, 256],
                                       is_training,
                                       bn_decay,
                                       scope='corrs_fa_layer1')
        l1_points = pointnet_fp_module(l1_xyz,
                                       l2_xyz,
                                       l1_points,
                                       l2_points, [256, 128],
                                       is_training,
                                       bn_decay,
                                       scope='corrs_fa_layer2')
        l0_points = pointnet_fp_module(xyz1,
                                       l1_xyz,
                                       None,
                                       l1_points, [128, 128, 64],
                                       is_training,
                                       bn_decay,
                                       scope='corrs_fa_layer3')
        # FC layers
        net = tf_util.conv1d(l0_points,
                             64,
                             1,
                             padding='VALID',
                             bn=True,
                             is_training=is_training,
                             scope='corrs_fc1',
                             bn_decay=bn_decay)
        net = tf_util.conv1d(net,
                             3,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='corrs_fc2')
        pred_flow = tf.reshape(net, [-1, num_point, 3])
    return pred_flow, pred_vismask, fpsidx1, fpsidx2, matching_score_sub
def ocEncoder_PointNET2_multilevel256_3mlp(input_points,
                                           verbose=True,
                                           is_training=None,
                                           bn_decay=None):

    l0_xyz = input_points
    l0_points = None

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.1,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1',
                                                       bn=False)

    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=256,
                                                       radius=0.2,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2',
                                                       bn=False)

    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=128,
                                                       radius=0.3,
                                                       nsample=64,
                                                       mlp=[256, 256, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3',
                                                       bn=False)

    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=32,
                                                       radius=0.4,
                                                       nsample=64,
                                                       mlp=[256, 256, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4',
                                                       bn=False)

    output_1 = encoder_with_convs_and_symmetry(l1_points,
                                               n_filters=[128, 128, 64])
    output_2 = encoder_with_convs_and_symmetry(l2_points,
                                               n_filters=[256, 256, 64])
    output_3 = encoder_with_convs_and_symmetry(l3_points,
                                               n_filters=[256, 256, 64])
    output_4 = encoder_with_convs_and_symmetry(l4_points,
                                               n_filters=[256, 256, 64])

    output_1234 = tf.concat([output_1, output_2, output_3, output_4], axis=1)

    print('output_1.shape = %s', output_1.shape)
    print('output_2.shape = %s', output_2.shape)
    print('output_3.shape = %s', output_3.shape)
    print('output_4.shape = %s', output_4.shape)

    return output_1234
def get_model_class(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Set abstraction layers
    # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
    # So we only use NCHW for layer 1 until this issue can be resolved.   npoint=512,128 nsample = 32, 64
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.2,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope=('layer1'),
                                                       scope_reuse=False,
                                                       use_nchw=True)
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=128,
                                                       radius=0.4,
                                                       nsample=32,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope=('layer2'),
                                                       scope_reuse=False)
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope=('layer3'),
                                                       scope_reuse=False)

    # Fully connected layers for classification
    net_class = tf.reshape(l3_points, [batch_size, -1])
    net_class = tf_util.fully_connected(net_class,
                                        512,
                                        bn=True,
                                        is_training=is_training,
                                        scope='fc1_class',
                                        bn_decay=bn_decay)  #512
    net_class = tf_util.dropout(net_class,
                                keep_prob=0.5,
                                is_training=is_training,
                                scope='dp1_class')
    net_class = tf_util.fully_connected(net_class,
                                        256,
                                        bn=True,
                                        is_training=is_training,
                                        scope='fc2_class',
                                        bn_decay=bn_decay)
    net_class = tf_util.dropout(net_class,
                                keep_prob=0.5,
                                is_training=is_training,
                                scope='dp2_class')  #256
    net_class = tf_util.fully_connected(net_class,
                                        10,
                                        activation_fn=None,
                                        scope='fc3_class')

    return net_class, end_points
Exemple #12
0
def get_displacements(input_points,
                      ske_features,
                      is_training,
                      FLAGS,
                      bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """

    batch_size = FLAGS.batch_size
    num_points = FLAGS.point_num_out

    point_cloud = input_points

    l0_xyz = point_cloud
    l0_points = None

    # Set Abstraction layers 第一从次2048个点提取1024个点
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(
        l0_xyz,
        l0_points,
        npoint=1024,
        radius=0.1 * FLAGS.radiusScal,
        nsample=64,
        mlp=[64, 64, 128],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer1')  ### 最后一个变量scope相当于变量前缀
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=384,
                                                       radius=0.2 *
                                                       FLAGS.radiusScal,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=128,
                                                       radius=0.4 *
                                                       FLAGS.radiusScal,
                                                       nsample=64,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')

    # PointNet
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[512, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4')

    ### Feature Propagation layers  #################  featrue maps are interpolated according to coordinate  ################
    # 根据l4的特征值差值出l3
    l3_points = pointnet_fp_module(l3_xyz,
                                   l4_xyz,
                                   l3_points,
                                   l4_points, [512, 512],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [512, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   l0_points,
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer4')

    # 加入提取的skeleton特征
    # ske_features : batch_size * featrues
    ske_features = tf.tile(tf.expand_dims(ske_features, 1), [1, num_points, 1])
    l0_points = tf.concat([l0_points, ske_features], axis=-1)
    # 特征转变成 displacement
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    net = tf_util.conv1d(net,
                         64,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc2',
                         bn_decay=bn_decay)
    net = tf_util.conv1d(net,
                         3,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc3')

    displacements = tf.sigmoid(net) * FLAGS.range_max * 2 - FLAGS.range_max

    return displacements
def get_model(point_cloud,
              is_training,
              scope='',
              num_point=None,
              bn_decay=None,
              ifglobal=False,
              bn=True,
              end_points={}):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    l0_points = None  #tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.2,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=128,
                                                       radius=0.4,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   tf.concat([l0_xyz, l0_points], axis=-1),
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['embedding'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         50,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')

    return net, end_points
Exemple #14
0
def get_model(point_cloud, is_training, NUM_CLASSES, normal, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    if normal == True:

        l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])
    elif normal == False:
        l0_points = None


# Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(
        l0_xyz,
        l0_points,
        512, [0.1, 0.2, 0.4], [64, 128, 128],
        [[32, 32, 64], [64, 64, 128], [64, 96, 128]],
        is_training,
        bn_decay,
        scope='layer1',
        use_nchw=True)
    l2_xyz, l2_points = pointnet_sa_module_msg(
        l1_xyz,
        l1_points,
        128, [0.2, 0.4, 0.8], [128, 128, 256],
        [[64, 64, 128], [128, 128, 256], [128, 128, 256]],
        is_training,
        bn_decay,
        scope='layer2')
    l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz,
                                              l2_points,
                                              npoint=None,
                                              radius=None,
                                              nsample=None,
                                              mlp=[256, 512, 1024],
                                              mlp2=None,
                                              group_all=True,
                                              is_training=is_training,
                                              bn_decay=bn_decay,
                                              scope='layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.4,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.4,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net,
                                  NUM_CLASSES,
                                  activation_fn=None,
                                  scope='fc3')

    return net, end_points
Exemple #15
0
def get_model_scannet(point_cloud, is_training, bn=True, bn_decay=None):
    """ ConvNet baseline, input is BxNx9 gray image """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    l0_xyz = point_cloud[:, :, :3]
    l0_points = None  #point_cloud[:, :, 3:6]

    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=1024,
                                                       radius=0.1,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=256,
                                                       radius=0.2,
                                                       nsample=32,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=64,
                                                       radius=0.4,
                                                       nsample=32,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=16,
                                                       radius=0.8,
                                                       nsample=32,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz,
                                   l4_xyz,
                                   l3_points,
                                   l4_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   l0_points,
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer4')

    return l0_points
Exemple #16
0
def get_model(point_cloud,
              is_training,
              num_class,
              bn_decay=None,
              gripper_feat=None,
              env_feat=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.01,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=256,
                                                       radius=0.02,
                                                       nsample=32,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=128,
                                                       radius=0.04,
                                                       nsample=32,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=64,
                                                       radius=0.08,
                                                       nsample=32,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4')
    l5_xyz, l5_points, l5_indices = pointnet_sa_module(l4_xyz,
                                                       l4_points,
                                                       npoint=48,
                                                       radius=0.16,
                                                       nsample=32,
                                                       mlp=[512, 512, 1024],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer5')
    l6_xyz, l6_points, l6_indices = pointnet_sa_module(l5_xyz,
                                                       l5_points,
                                                       npoint=4,
                                                       radius=0.20,
                                                       nsample=32,
                                                       mlp=[1024, 1024, 2048],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer6')

    if env_feat is None:
        extra_feat = gripper_feat
    else:
        extra_feat = tf.concat([gripper_feat, env_feat], axis=-1)
    extra_feat = tf.expand_dims(extra_feat, axis=1)
    extra_feat = tf.tile(extra_feat, [1, 4, 1])

    l6_points = tf.concat([l6_points, extra_feat], axis=-1)

    # Feature Propagation layers
    l5_points = pointnet_fp_module(l5_xyz,
                                   l6_xyz,
                                   l5_points,
                                   l6_points, [2048, 2048, 1024],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer5',
                                   bn=True)
    l4_points = pointnet_fp_module(l4_xyz,
                                   l5_xyz,
                                   l4_points,
                                   l5_points, [1024, 1024, 512],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer0',
                                   bn=True)
    l3_points = pointnet_fp_module(l3_xyz,
                                   l4_xyz,
                                   l3_points,
                                   l4_points, [512, 512, 384],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1',
                                   bn=True)
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [384, 384, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2',
                                   bn=True)
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3',
                                   bn=True)
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   l0_points,
                                   l1_points, [128, 128, 64],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer4',
                                   bn=True)
    #print(l0_points)
    # FC layers
    net = l0_points  #tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=False, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    #net = tf_util.conv1d(net, 64, 1, padding='VALID', bn=False, is_training=is_training, scope='fc1_1', bn_decay=bn_decay)
    end_points['feats'] = net

    #net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    return end_points
Exemple #17
0
def get_instance_seg_v2_net(point_cloud, one_hot_vec, is_training, bn_decay,
                            end_points):
    ''' 3D instance segmentation PointNet v2 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''

    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 1])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(
        l0_xyz,
        l0_points,
        128, [0.2, 0.4, 0.8], [1 * i for i in [32, 64, 128]],
        [[32, 32, 64], [64, 64, 128], [64, 96, 128]],
        is_training,
        bn_decay,
        scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(
        l1_xyz,
        l1_points,
        32, [0.4, 0.8, 1.6], [1 * i for i in [64, 64, 128]],
        [[64, 64, 128], [128, 128, 256], [128, 128, 256]],
        is_training,
        bn_decay,
        scope='layer2')
    l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz,
                                              l2_points,
                                              npoint=None,
                                              radius=None,
                                              nsample=None,
                                              mlp=[128, 256, 1024],
                                              mlp2=None,
                                              group_all=True,
                                              is_training=is_training,
                                              bn_decay=bn_decay,
                                              scope='layer3')

    # Feature Propagation layers
    l3_points = tf.concat([l3_points, tf.expand_dims(one_hot_vec, 1)], axis=2)
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   tf.concat([l0_xyz, l0_points], axis=-1),
                                   l1_points, [128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='conv1d-fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    logits = tf_util.conv1d(net,
                            2,
                            1,
                            padding='VALID',
                            activation_fn=None,
                            scope='conv1d-fc2')

    return logits, end_points
Exemple #18
0
def get_3d_box_estimation_v2_net(object_point_cloud, one_hot_vec, is_training,
                                 bn_decay, end_points):
    ''' 3D Box Estimation PointNet v2 network.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            masked point clouds in object coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
            including box centers, heading bin class scores and residuals,
            and size cluster scores and residuals
    '''
    # Gather object points
    batch_size = object_point_cloud.get_shape()[0].value

    l0_xyz = object_point_cloud
    l0_points = None
    # Set abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=128,
                                                       radius=0.2,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='ssg-layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=32,
                                                       radius=0.4,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='ssg-layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='ssg-layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)

    # The first 3 numbers: box center coordinates (cx,cy,cz),
    # the next NUM_HEADING_BIN*2:  heading bin class scores and bin residuals
    # next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
    output = tf_util.fully_connected(net,
                                     3 + NUM_HEADING_BIN * 2 +
                                     NUM_SIZE_CLUSTER * 4,
                                     activation_fn=None,
                                     scope='fc3')
    return output, end_points
def get_model(point_cloud, is_training, NUM_CLASSES, normal, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    if normal == True:
        l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])
    #l0_xyz = point_cloud
    elif normal == False:
        l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Set abstraction layers
    # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
    # So we only use NCHW for layer 1 until this issue can be resolved.
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.2,
                                                       nsample=128,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1',
                                                       use_nchw=True)
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=128,
                                                       radius=0.4,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net,
                                  NUM_CLASSES,
                                  activation_fn=None,
                                  scope='fc3')

    return net, end_points
Exemple #20
0
def get_model(point_cloud,
              is_training,
              num_class,
              num_embed=5,
              sigma=0.05,
              bn_decay=None,
              is_dist=False):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud[:, :, :3]
    l0_points = point_cloud[:, :, 3:]
    end_points['l0_xyz'] = l0_xyz

    # shared encoder
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=1024,
                                                       radius=0.1,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       is_dist=is_dist,
                                                       scope='layer1')
    l2_xyz, l2_points = pointconv_encoding(l1_xyz,
                                           l1_points,
                                           npoint=256,
                                           radius=0.2,
                                           sigma=2 * sigma,
                                           K=32,
                                           mlp=[64, 64, 128],
                                           is_training=is_training,
                                           bn_decay=bn_decay,
                                           is_dist=is_dist,
                                           weight_decay=None,
                                           scope='layer2')
    l3_xyz, l3_points = pointconv_encoding(l2_xyz,
                                           l2_points,
                                           npoint=64,
                                           radius=0.4,
                                           sigma=4 * sigma,
                                           K=32,
                                           mlp=[128, 128, 256],
                                           is_training=is_training,
                                           bn_decay=bn_decay,
                                           is_dist=is_dist,
                                           weight_decay=None,
                                           scope='layer3')
    l4_xyz, l4_points = pointconv_encoding(l3_xyz,
                                           l3_points,
                                           npoint=32,
                                           radius=0.8,
                                           sigma=8 * sigma,
                                           K=32,
                                           mlp=[256, 256, 512],
                                           is_training=is_training,
                                           bn_decay=bn_decay,
                                           is_dist=is_dist,
                                           weight_decay=None,
                                           scope='layer4')

    # semantic decoder
    l3_points_sem = pointconv_decoding_depthwise(l3_xyz,
                                                 l4_xyz,
                                                 l3_points,
                                                 l4_points,
                                                 radius=0.8,
                                                 sigma=8 * sigma,
                                                 K=16,
                                                 mlp=[512, 512],
                                                 is_training=is_training,
                                                 bn_decay=bn_decay,
                                                 is_dist=is_dist,
                                                 weight_decay=None,
                                                 scope='sem_fa_layer1')
    l2_points_sem = pointconv_decoding_depthwise(
        l2_xyz,
        l3_xyz,
        l2_points,
        l3_points_sem,
        radius=0.4,
        sigma=4 * sigma,
        K=16,
        mlp=[256, 256],
        is_training=is_training,
        bn_decay=bn_decay,
        is_dist=is_dist,
        weight_decay=None,
        scope='sem_fa_layer2')  # 48x256x256
    l1_points_sem = pointconv_decoding_depthwise(
        l1_xyz,
        l2_xyz,
        l1_points,
        l2_points_sem,
        radius=0.2,
        sigma=2 * sigma,
        K=16,
        mlp=[256, 128],
        is_training=is_training,
        bn_decay=bn_decay,
        is_dist=is_dist,
        weight_decay=None,
        scope='sem_fa_layer3')  # 48x1024x128
    l0_points_sem = pointnet_fp_module(l0_xyz,
                                       l1_xyz,
                                       l0_points,
                                       l1_points_sem, [128, 128, 128],
                                       is_training,
                                       bn_decay,
                                       is_dist=is_dist,
                                       scope='sem_fa_layer4')  # 48x4096x128

    # instance decoder
    l3_points_ins = pointconv_decoding_depthwise(l3_xyz,
                                                 l4_xyz,
                                                 l3_points,
                                                 l4_points,
                                                 radius=0.8,
                                                 sigma=8 * sigma,
                                                 K=16,
                                                 mlp=[512, 512],
                                                 is_training=is_training,
                                                 bn_decay=bn_decay,
                                                 is_dist=is_dist,
                                                 weight_decay=None,
                                                 scope='ins_fa_layer1')
    l2_points_ins = pointconv_decoding_depthwise(
        l2_xyz,
        l3_xyz,
        l2_points,
        l3_points_ins,
        radius=0.4,
        sigma=4 * sigma,
        K=16,
        mlp=[256, 256],
        is_training=is_training,
        bn_decay=bn_decay,
        is_dist=is_dist,
        weight_decay=None,
        scope='ins_fa_layer2')  # 48x256x256
    l1_points_ins = pointconv_decoding_depthwise(
        l1_xyz,
        l2_xyz,
        l1_points,
        l2_points_ins,
        radius=0.2,
        sigma=2 * sigma,
        K=16,
        mlp=[256, 128],
        is_training=is_training,
        bn_decay=bn_decay,
        is_dist=is_dist,
        weight_decay=None,
        scope='ins_fa_layer3')  # 48x1024x128
    l0_points_ins = pointnet_fp_module(l0_xyz,
                                       l1_xyz,
                                       l0_points,
                                       l1_points_ins, [128, 128, 128],
                                       is_training,
                                       bn_decay,
                                       is_dist=is_dist,
                                       scope='ins_fa_layer4')  # 48x4096x128

    # FC layers F_sem
    l2_points_sem_up = pointnet_upsample(l0_xyz,
                                         l2_xyz,
                                         l2_points_sem,
                                         scope='sem_up1')
    l1_points_sem_up = pointnet_upsample(l0_xyz,
                                         l1_xyz,
                                         l1_points_sem,
                                         scope='sem_up2')
    net_sem_0 = tf.add(tf.concat([l0_points_sem, l1_points_sem_up],
                                 axis=-1,
                                 name='sem_up_concat'),
                       l2_points_sem_up,
                       name='sem_up_add')
    net_sem_0 = tf_util.conv1d(net_sem_0,
                               128,
                               1,
                               padding='VALID',
                               bn=True,
                               is_training=is_training,
                               is_dist=is_dist,
                               scope='sem_fc1',
                               bn_decay=bn_decay)

    # FC layers F_ins
    l2_points_ins_up = pointnet_upsample(l0_xyz,
                                         l2_xyz,
                                         l2_points_ins,
                                         scope='ins_up1')
    l1_points_ins_up = pointnet_upsample(l0_xyz,
                                         l1_xyz,
                                         l1_points_ins,
                                         scope='ins_up2')
    net_ins_0 = tf.add(tf.concat([l0_points_ins, l1_points_ins_up],
                                 axis=-1,
                                 name='ins_up_concat'),
                       l2_points_ins_up,
                       name='ins_up_add')
    net_ins_0 = tf_util.conv1d(net_ins_0,
                               128,
                               1,
                               padding='VALID',
                               bn=True,
                               is_training=is_training,
                               is_dist=is_dist,
                               scope='ins_fc1',
                               bn_decay=bn_decay)

    # Adaptation
    net_sem_cache_0 = tf_util.conv1d(net_sem_0,
                                     128,
                                     1,
                                     padding='VALID',
                                     bn=True,
                                     is_training=is_training,
                                     is_dist=is_dist,
                                     scope='sem_cache_1',
                                     bn_decay=bn_decay)
    net_ins_1 = net_ins_0 + net_sem_cache_0

    net_ins_2 = tf.concat([net_ins_0, net_ins_1],
                          axis=-1,
                          name='net_ins_2_concat')
    net_ins_atten = tf.sigmoid(tf.reduce_mean(net_ins_2,
                                              axis=-1,
                                              keep_dims=True,
                                              name='ins_reduce'),
                               name='ins_atten')
    net_ins_3 = net_ins_2 * net_ins_atten

    # Aggregation
    net_ins_cache_0 = tf_util.conv1d(net_ins_3,
                                     128,
                                     1,
                                     padding='VALID',
                                     bn=True,
                                     is_training=is_training,
                                     is_dist=is_dist,
                                     scope='ins_cache_1',
                                     bn_decay=bn_decay)
    net_ins_cache_1 = tf.reduce_mean(net_ins_cache_0,
                                     axis=1,
                                     keep_dims=True,
                                     name='ins_cache_2')
    net_ins_cache_1 = tf.tile(net_ins_cache_1, [1, num_point, 1],
                              name='ins_cache_tile')
    net_sem_1 = net_sem_0 + net_ins_cache_1

    net_sem_2 = tf.concat([net_sem_0, net_sem_1],
                          axis=-1,
                          name='net_sem_2_concat')
    net_sem_atten = tf.sigmoid(tf.reduce_mean(net_sem_2,
                                              axis=-1,
                                              keep_dims=True,
                                              name='sem_reduce'),
                               name='sem_atten')
    net_sem_3 = net_sem_2 * net_sem_atten

    # Output
    net_ins_3 = tf_util.conv1d(net_ins_3,
                               128,
                               1,
                               padding='VALID',
                               bn=True,
                               is_training=is_training,
                               is_dist=is_dist,
                               scope='ins_fc2',
                               bn_decay=bn_decay)
    net_ins_4 = tf_util.dropout(net_ins_3,
                                keep_prob=0.5,
                                is_training=is_training,
                                scope='ins_dp_4')
    net_ins_4 = tf_util.conv1d(net_ins_4,
                               num_embed,
                               1,
                               padding='VALID',
                               activation_fn=None,
                               is_dist=is_dist,
                               scope='ins_fc5')

    net_sem_3 = tf_util.conv1d(net_sem_3,
                               128,
                               1,
                               padding='VALID',
                               bn=True,
                               is_training=is_training,
                               is_dist=is_dist,
                               scope='sem_fc2',
                               bn_decay=bn_decay)
    net_sem_4 = tf_util.dropout(net_sem_3,
                                keep_prob=0.5,
                                is_training=is_training,
                                scope='sem_dp_4')
    net_sem_4 = tf_util.conv1d(net_sem_4,
                               num_class,
                               1,
                               padding='VALID',
                               activation_fn=None,
                               is_dist=is_dist,
                               scope='sem_fc5')

    return net_sem_4, net_ins_4
Exemple #21
0
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # for each point (x,y,z) p
    #   generate 3 3D anchors at p
    #   then apply the predict box deltas to each anchor boxes
    #   calculate the box deltas beween 3D anchor bounding box and ground truth
    #
    num_3d_anchors = cfg.TRAIN.NUM_ANCHORS
    num_regression = cfg.TRAIN.NUM_REGRESSION  # 7 = l,w,h,theta,x,y,z
    # Layer 1
    # [8,1024,3] [8,1024,64] [8,1024,32]
    # Note: the important tuning parameters  radius_l* = 1 m
    radius_l1 = cfg.TRAIN.Radius_1
    radius_l2 = cfg.TRAIN.Radius_2
    radius_l3 = cfg.TRAIN.Radius_3
    radius_l4 = cfg.TRAIN.Radius_4
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=8192,
                                                       radius=radius_l1,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=2048,
                                                       radius=radius_l2,
                                                       nsample=32,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=1024,
                                                       radius=radius_l3,
                                                       nsample=32,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=512,
                                                       radius=radius_l4,
                                                       nsample=32,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4')

    # Feature Propagation layers
    #l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    #l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    #l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    #l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l4_points,
                         512,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net_class = tf_util.conv1d(
        net,
        num_3d_anchors * num_class,
        1,
        padding='VALID',
        activation_fn=None,
        scope='fc2')  # outputing the classification for every point
    net_boxes = tf_util.conv1d(net,
                               num_3d_anchors * num_regression,
                               1,
                               padding='VALID',
                               activation_fn=None,
                               scope='fc3')  # outputing the 3D bounding boxes

    return end_points, net_class, net_boxes, l4_xyz
Exemple #22
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])  ## [B,N,3] xyz
    l0_points = tf.slice(point_cloud, [0, 0, 3],
                         [-1, -1, 3])  ## [B,N,6-3] normal

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.2,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    ## [B,512,64]
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=128,
                                                       radius=0.4,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    ## [B,128,256]
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')
    ## [B,1,1024]

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    ## [B,128,256]
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    ## [B,512,128]
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   tf.concat([l0_xyz, l0_points], axis=-1),
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')
    ## [B,1,128]

    ## weight sum module
    we_points1, idx = weight_layer_KNN(l0_xyz,
                                       l0_points,
                                       128,
                                       is_training=is_training,
                                       scope='we1')
    #we_points2, _ = weight_layer(l0_xyz, we_points1, 64, is_training=is_training, scope='we2')

    # FC layers
    #net = tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    #end_points['feats'] = net
    net = tf_util.dropout(we_points1,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    #net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Exemple #23
0
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud[:, :, :3]
    l0_points = point_cloud[:, :, 3:]
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=1024,
                                                       radius=0.1,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=256,
                                                       radius=0.2,
                                                       nsample=32,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=64,
                                                       radius=0.4,
                                                       nsample=32,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=16,
                                                       radius=0.8,
                                                       nsample=32,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4')

    # Feature Propagation layers
    l3_points_sem = pointnet_fp_module(l3_xyz,
                                       l4_xyz,
                                       l3_points,
                                       l4_points, [256, 256],
                                       is_training,
                                       bn_decay,
                                       scope='sem_fa_layer1')
    l2_points_sem = pointnet_fp_module(l2_xyz,
                                       l3_xyz,
                                       l2_points,
                                       l3_points_sem, [256, 256],
                                       is_training,
                                       bn_decay,
                                       scope='sem_fa_layer2')
    l1_points_sem = pointnet_fp_module(l1_xyz,
                                       l2_xyz,
                                       l1_points,
                                       l2_points_sem, [256, 128],
                                       is_training,
                                       bn_decay,
                                       scope='sem_fa_layer3')
    l0_points_sem = pointnet_fp_module(l0_xyz,
                                       l1_xyz,
                                       l0_points,
                                       l1_points_sem, [128, 128, 128],
                                       is_training,
                                       bn_decay,
                                       scope='sem_fa_layer4')

    # FC layers
    net_sem = tf_util.conv1d(l0_points_sem,
                             128,
                             1,
                             padding='VALID',
                             bn=True,
                             is_training=is_training,
                             scope='sem_fc1',
                             bn_decay=bn_decay)
    net_sem_cache = tf_util.conv1d(net_sem,
                                   128,
                                   1,
                                   padding='VALID',
                                   bn=True,
                                   is_training=is_training,
                                   scope='sem_cache',
                                   bn_decay=bn_decay)

    # ins
    l3_points_ins = pointnet_fp_module(l3_xyz,
                                       l4_xyz,
                                       l3_points,
                                       l4_points, [256, 256],
                                       is_training,
                                       bn_decay,
                                       scope='ins_fa_layer1')
    l2_points_ins = pointnet_fp_module(l2_xyz,
                                       l3_xyz,
                                       l2_points,
                                       l3_points_ins, [256, 256],
                                       is_training,
                                       bn_decay,
                                       scope='ins_fa_layer2')
    l1_points_ins = pointnet_fp_module(l1_xyz,
                                       l2_xyz,
                                       l1_points,
                                       l2_points_ins, [256, 128],
                                       is_training,
                                       bn_decay,
                                       scope='ins_fa_layer3')
    l0_points_ins = pointnet_fp_module(l0_xyz,
                                       l1_xyz,
                                       l0_points,
                                       l1_points_ins, [128, 128, 128],
                                       is_training,
                                       bn_decay,
                                       scope='ins_fa_layer4')

    net_ins = tf_util.conv1d(l0_points_ins,
                             128,
                             1,
                             padding='VALID',
                             bn=True,
                             is_training=is_training,
                             scope='ins_fc1',
                             bn_decay=bn_decay)

    net_ins = net_ins + net_sem_cache
    net_ins = tf_util.dropout(net_ins,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='ins_dp1')
    net_ins = tf_util.conv1d(net_ins,
                             5,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='ins_fc4')

    k = 30
    adj_matrix = tf_util.pairwise_distance_l1(net_ins)
    nn_idx = tf_util.knn_thres(adj_matrix, k=k)
    nn_idx = tf.stop_gradient(nn_idx)

    net_sem = tf_util.get_local_feature(net_sem, nn_idx=nn_idx,
                                        k=k)  # [b, n, k, c]
    net_sem = tf.reduce_max(net_sem, axis=-2, keep_dims=False)

    net_sem = tf_util.dropout(net_sem,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='sem_dp1')
    net_sem = tf_util.conv1d(net_sem,
                             num_class,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='sem_fc4')

    return net_sem, net_ins
Exemple #24
0
    def build(self):
        point_cloud = self.placeholders['pointclouds']
        is_training = self.placeholders['is_training_pl']
        batch_size = self.batch_size
        # image
        '''
        seg_softmax = self.placeholders['img_seg_map']
        seg_pred = tf.expand_dims(tf.argmax(seg_softmax, axis=-1), axis=-1)
        self._img_pixel_size = np.asarray([360, 1200])
        box2d_corners, box2d_corners_norm = projection.tf_project_to_image_space(
            self.placeholders['proposal_boxes'],
            self.placeholders['calib'], self._img_pixel_size)
        # y1, x1, y2, x2
        box2d_corners_norm_reorder = tf.stack([
            tf.gather(box2d_corners_norm, 1, axis=-1),
            tf.gather(box2d_corners_norm, 0, axis=-1),
            tf.gather(box2d_corners_norm, 3, axis=-1),
            tf.gather(box2d_corners_norm, 2, axis=-1),
        ], axis=-1)
        img_rois = tf.image.crop_and_resize(
            seg_softmax,
            #seg_pred,
            box2d_corners_norm_reorder,
            tf.range(0, batch_size),
            [16,16])
        self.end_points['img_rois'] = img_rois
        self.end_points['box2d_corners_norm_reorder'] = box2d_corners_norm_reorder
        '''

        l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
        if self.num_channel > 3:
            l0_points = tf.slice(point_cloud, [0, 0, 3],
                                 [-1, -1, self.num_channel - 3])
        else:
            l0_points = None
        # Set abstraction layers
        l1_xyz, l1_points, _ = pointnet_sa_module(l0_xyz,
                                                  l0_points,
                                                  npoint=128,
                                                  radius=0.2,
                                                  nsample=64,
                                                  mlp=[128, 128, 128],
                                                  mlp2=None,
                                                  group_all=False,
                                                  is_training=is_training,
                                                  bn_decay=self.bn_decay,
                                                  scope='rcnn-sa1',
                                                  bn=True)
        l2_xyz, l2_points, _ = pointnet_sa_module(l1_xyz,
                                                  l1_points,
                                                  npoint=32,
                                                  radius=0.4,
                                                  nsample=64,
                                                  mlp=[128, 128, 256],
                                                  mlp2=None,
                                                  group_all=False,
                                                  is_training=is_training,
                                                  bn_decay=self.bn_decay,
                                                  scope='rcnn-sa2',
                                                  bn=True)
        l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz,
                                                  l2_points,
                                                  npoint=-1,
                                                  radius=100,
                                                  nsample=64,
                                                  mlp=[256, 256, 512],
                                                  mlp2=None,
                                                  group_all=True,
                                                  is_training=is_training,
                                                  bn_decay=self.bn_decay,
                                                  scope='rcnn-sa3',
                                                  bn=True)

        point_feats = l3_points

        # Classification
        cls_net = tf_util.conv1d(point_feats,
                                 256,
                                 1,
                                 padding='VALID',
                                 bn=True,
                                 is_training=is_training,
                                 scope='rcnn-cls-fc1',
                                 bn_decay=self.bn_decay)
        # cls_net = tf_util.dropout(cls_net, keep_prob=0.5,
        #     is_training=is_training, scope='rcnn-cls-dp')
        cls_net = tf_util.conv1d(cls_net,
                                 256,
                                 1,
                                 padding='VALID',
                                 bn=True,
                                 is_training=is_training,
                                 scope='rcnn-cls-fc2',
                                 bn_decay=self.bn_decay)
        cls_out = tf_util.conv1d(cls_net,
                                 NUM_OBJ_CLASSES,
                                 1,
                                 padding='VALID',
                                 activation_fn=None,
                                 scope='conv1d-fc2')
        cls_out = tf.squeeze(cls_out, axis=1)
        self.end_points['cls_logits'] = cls_out

        # Box estimation
        cls_label_pred = tf.argmax(tf.nn.softmax(cls_net), axis=-1)
        one_hot_pred = tf.one_hot(cls_label_pred, NUM_OBJ_CLASSES,
                                  axis=-1)  # (B, 1, NUM_OBJ_CLASSES)
        one_hot_gt = tf.one_hot(self.placeholders['class_labels'],
                                NUM_OBJ_CLASSES,
                                axis=-1)  # (B, NUM_OBJ_CLASSES)
        one_hot_gt = tf.expand_dims(one_hot_gt, axis=1)
        one_hot_vec = tf.cond(is_training, lambda: one_hot_gt,
                              lambda: one_hot_pred)
        one_hot_vec.set_shape([batch_size, 1, NUM_OBJ_CLASSES])
        est_intput = tf.concat([point_feats, one_hot_vec], axis=-1)
        box_net = tf_util.conv1d(est_intput,
                                 256,
                                 1,
                                 padding='VALID',
                                 bn=True,
                                 is_training=is_training,
                                 scope='rcnn-box-fc1',
                                 bn_decay=self.bn_decay)
        # cls_net = tf_util.dropout(cls_net, keep_prob=0.5,
        #     is_training=is_training, scope='rcnn-cls-dp')
        box_net = tf_util.conv1d(box_net,
                                 256,
                                 1,
                                 padding='VALID',
                                 bn=True,
                                 is_training=is_training,
                                 scope='rcnn-box-fc2',
                                 bn_decay=self.bn_decay)
        # The first NUM_CENTER_BIN*2*2: CENTER_BIN class scores and bin residuals for (x,z)
        # next 1: center residual for y
        # next NUM_HEADING_BIN*2: heading bin class scores and residuals
        # next NUM_SIZE_CLUSTER*4: size cluster class scores and residuals(l,w,h)
        box_out = tf_util.conv1d(box_net,
                                 NUM_CENTER_BIN * 2 * 2 + 1 +
                                 NUM_HEADING_BIN * 2 + NUM_SIZE_CLUSTER * 4,
                                 1,
                                 padding='VALID',
                                 activation_fn=None,
                                 scope='rcnn-box-out')
        box_out = tf.squeeze(box_out, axis=1)
        self.parse_output_to_tensors(box_out)
        self.get_output_boxes()
def get_model(point_cloud, cls_label, pp_idx, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    cls_label_one_hot = tf.one_hot(cls_label,
                                   depth=NUM_CATEGORIES,
                                   on_value=1.0,
                                   off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot,
                                   [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1, num_point, 1])

    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    orig_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])

    # Set Abstraction layers
    l1_xyz, l1_points, _ = pointnet_sa_module(l0_xyz,
                                              orig_points,
                                              npoint=512,
                                              radius=0.2,
                                              nsample=64,
                                              mlp=[64, 64, 128],
                                              mlp2=None,
                                              group_all=False,
                                              is_training=is_training,
                                              bn_decay=bn_decay,
                                              scope='layer1')
    l2_xyz, l2_points, _ = pointnet_sa_module(l1_xyz,
                                              l1_points,
                                              npoint=128,
                                              radius=0.4,
                                              nsample=64,
                                              mlp=[128, 128, 256],
                                              mlp2=None,
                                              group_all=False,
                                              is_training=is_training,
                                              bn_decay=bn_decay,
                                              scope='layer2')

    # down sampling to one global point
    l3_xyz, l32_points, _ = pointnet_sa_module(l2_xyz,
                                               l2_points,
                                               npoint=None,
                                               radius=None,
                                               nsample=None,
                                               mlp=[256, 512, 1024],
                                               mlp2=None,
                                               group_all=True,
                                               is_training=is_training,
                                               bn_decay=bn_decay,
                                               scope='layer32')
    _, l31_points, _ = pointnet_sa_module(l1_xyz,
                                          l1_points,
                                          npoint=None,
                                          radius=None,
                                          nsample=None,
                                          mlp=[128, 256, 512, 1024],
                                          mlp2=None,
                                          group_all=True,
                                          is_training=is_training,
                                          bn_decay=bn_decay,
                                          scope='layer31')
    _, l30_points, _ = pointnet_sa_module(l0_xyz,
                                          orig_points,
                                          npoint=None,
                                          radius=None,
                                          nsample=None,
                                          mlp=[64, 128, 256, 512, 1024],
                                          mlp2=None,
                                          group_all=True,
                                          is_training=is_training,
                                          bn_decay=bn_decay,
                                          scope='layer30')

    l3_points = l32_points + l31_points + l30_points
    #l3_points = tf_util.dropout(l3_points, keep_prob=0.5, is_training=is_training, scope='dp1')

    # Feature propagation layers (feature propagation after each SA layer)
    fp2_points = pointnet_fp_module(
        l0_xyz,
        l3_xyz,
        tf.concat([cls_label_one_hot, l0_xyz, orig_points], axis=-1),
        l3_points, [256, 128],
        is_training,
        bn_decay,
        scope='dfp_layer1')
    fp1_points = pointnet_fp_module(
        l0_xyz,
        l2_xyz,
        tf.concat([cls_label_one_hot, l0_xyz, orig_points], axis=-1),
        l2_points, [256, 128],
        is_training,
        bn_decay,
        scope='dfp_layer2')
    fp0_points = pointnet_fp_module(
        l0_xyz,
        l1_xyz,
        tf.concat([cls_label_one_hot, l0_xyz, orig_points], axis=-1),
        l1_points, [128, 128],
        is_training,
        bn_decay,
        scope='dfp_layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fp_layer1')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fp_layer2')
    l0_points = pointnet_fp_module(
        l0_xyz,
        l1_xyz,
        tf.concat([cls_label_one_hot, l0_xyz, orig_points], axis=-1),
        l1_points, [128, 128, 128],
        is_training,
        bn_decay,
        scope='fp_layer3')

    # Residual propagation
    rfp2_points = pointnet_fp_module(
        l0_xyz,
        l2_xyz,
        tf.concat([cls_label_one_hot, l0_xyz, orig_points], axis=-1),
        l2_points, [256, 128],
        is_training,
        bn_decay,
        scope='rfp_layer1')
    rfp1_points = pointnet_fp_module(
        l0_xyz,
        l1_xyz,
        tf.concat([cls_label_one_hot, l0_xyz, orig_points], axis=-1),
        l1_points, [128, 128],
        is_training,
        bn_decay,
        scope='rfp_layer2')
    rfp_points = rfp2_points + rfp1_points  # residual upsampling

    #l0_points = tf.concat([fp2_points, fp1_points, fp0_points, l0_points], axis=-1) # concat
    l0_points = fp2_points + fp1_points + fp0_points + l0_points + rfp_points  # v2(residuals)
    #l0_points = fp2_points + fp1_points + fp0_points + l0_points # sum

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc2',
                         bn_decay=bn_decay)
    end_points['feats'] = net

    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')

    pred = tf_util.conv1d(net,
                          50,
                          1,
                          padding='VALID',
                          activation_fn=None,
                          scope='fc3')

    if pp_idx is not None:
        pp_pred = get_pp_pred(end_points['feats'], pp_idx)
    else:
        pp_pred = None

    return pred, pp_pred, end_points
def get_model(point_cloud, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """

    ############# PointNet++ 分割部分,先采样,再分组,并分层提取出点云特征######################

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])

    # Set abstraction layers, 分层特征提取(加SSG策略,single scale grouping)
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.2,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=128,
                                                       radius=0.4,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')

    # Feature Propagation layers, 对特征进行插值,恢复到之前的点数,最后对每个点进行分类
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   tf.concat([l0_xyz, l0_points], axis=-1),
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         50,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')  ##(B,L,50)

    return net, end_points
Exemple #27
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    num_class = 5

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 1])
    #l0_points = None
    #end_points['l0_xyz'] = l0_xyz

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.02,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1',
                                                       bn=False)
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=256,
                                                       radius=0.05,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2',
                                                       bn=False)
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[128, 256, 512],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3',
                                                       bn=False)

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1',
                                   bn=False)
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [128, 64],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2',
                                   bn=False)
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   tf.concat([l0_xyz, l0_points], axis=-1),
                                   l1_points, [64, 64, 64],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3',
                                   bn=False)
    #l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=False,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         num_class,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')

    reg_net = tf.reshape(l3_points, [batch_size, -1])
    reg_net = tf_util.fully_connected(reg_net,
                                      256,
                                      bn=False,
                                      is_training=is_training,
                                      scope='refc1',
                                      bn_decay=bn_decay)
    reg_net = tf_util.fully_connected(reg_net,
                                      128,
                                      bn=False,
                                      is_training=is_training,
                                      scope='refc2',
                                      bn_decay=bn_decay)
    reg_net = tf_util.fully_connected(reg_net,
                                      21,
                                      activation_fn=None,
                                      scope='refc3')
    reg_net = tf.reshape(reg_net, [batch_size, 3, 7])

    return net, reg_net, end_points
Exemple #28
0
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=1024,
                                                       radius=0.1,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=256,
                                                       radius=0.2,
                                                       nsample=32,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=64,
                                                       radius=0.4,
                                                       nsample=32,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=16,
                                                       radius=0.8,
                                                       nsample=32,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz,
                                   l4_xyz,
                                   l3_points,
                                   l4_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   l0_points,
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         num_class,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')

    return net, end_points
def get_model_seg_cnn(point_cloud, img_cnn, is_training, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Layer 1 Encoder
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=10000,
                                                       radius=0.1,
                                                       nsample=32,
                                                       mlp=[32, 32, 64],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=5000,
                                                       radius=0.2,
                                                       nsample=32,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=4000,
                                                       radius=0.4,
                                                       nsample=32,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=1000,
                                                       radius=0.8,
                                                       nsample=32,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz,
                                   l4_xyz,
                                   l3_points,
                                   l4_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   l0_points,
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer4')  #1 x 10000 x 128

    # CNN layers for edge detection Encoder
    conv1 = tf.layers.conv2d(inputs=img_cnn,
                             filters=32,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    conv2 = tf.layers.conv2d(inputs=pool1,
                             filters=64,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    #Decoder
    conv3 = tf.layers.conv2d(pool2,
                             filters=32,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             name='conv3',
                             padding='SAME',
                             use_bias=True,
                             activation=tf.nn.relu)
    upsample1 = tf.layers.conv2d_transpose(conv3,
                                           filters=64,
                                           kernel_size=3,
                                           padding='same',
                                           strides=2,
                                           name='upsample1')
    upsample2 = tf.layers.conv2d_transpose(upsample1,
                                           filters=64,
                                           kernel_size=3,
                                           padding='same',
                                           strides=2,
                                           name='upsample2')
    conv4 = tf.layers.conv2d(upsample2,
                             filters=128,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             name='upsample3',
                             padding='SAME',
                             use_bias=True)
    net_class = tf.reshape(conv4,
                           [batch_size, num_point, 128])  #1 x 10000 x 128

    #Concatenate
    net = tf.concat([l0_points, net_class], axis=-1)

    # FC layers
    net = tf_util.conv1d(net,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         12,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')
    return net, end_points
Exemple #30
0
def trans_pred_net(xyz,
                   flow,
                   scopename,
                   reuse,
                   is_training,
                   bn_decay=None,
                   nfea=12):
    #########################
    # input
    #   xyz: (B x N x 3)
    #   flow: (B x N x 3)
    # output
    #   pred_trans: (B x N x nfea)
    #########################
    num_point = xyz.get_shape()[1].value
    with tf.variable_scope(scopename) as myscope:
        if reuse:
            myscope.reuse_variables()
        l0_xyz = xyz
        l0_points = flow
        # Set Abstraction layers
        l1_xyz, l1_points, l1_indices = pointnet_sa_module_msg(
            l0_xyz,
            l0_points,
            256, [0.1, 0.2], [64, 64], [[64, 64], [64, 64], [64, 128]],
            is_training,
            bn_decay,
            scope='trans_layer1',
            centralize_points=True)
        l2_xyz, l2_points, l2_indices = pointnet_sa_module(
            l1_xyz,
            l1_points,
            npoint=128,
            radius=0.4,
            nsample=64,
            mlp=[128, 128, 256],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='trans_layer2')
        l3_xyz, l3_points, l3_indices = pointnet_sa_module(
            l2_xyz,
            l2_points,
            npoint=None,
            radius=None,
            nsample=None,
            mlp=[256, 512, 1024],
            mlp2=None,
            group_all=True,
            use_xyz=True,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='trans_layer3')
        # Feature Propagation layers
        l2_points = pointnet_fp_module(l2_xyz,
                                       l3_xyz,
                                       l2_points,
                                       l3_points, [256, 256],
                                       is_training,
                                       bn_decay,
                                       scope='trans_fa_layer1')
        l1_points = pointnet_fp_module(l1_xyz,
                                       l2_xyz,
                                       l1_points,
                                       l2_points, [256, 128],
                                       is_training,
                                       bn_decay,
                                       scope='trans_fa_layer2')
        l0_points = pointnet_fp_module(l0_xyz,
                                       l1_xyz,
                                       l0_points,
                                       l1_points, [128, 128, 64],
                                       is_training,
                                       bn_decay,
                                       scope='trans_fa_layer3')
        # FC layers
        net = tf_util.conv1d(l0_points,
                             64,
                             1,
                             padding='VALID',
                             bn=True,
                             is_training=is_training,
                             scope='trans_fc1',
                             bn_decay=bn_decay)
        net = tf_util.conv1d(net,
                             nfea,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='trans_fc2')
        pred_trans = tf.reshape(net, [-1, num_point, nfea])
    return pred_trans
Exemple #31
0
    def get_model_w_ae_p(self, point_cloud, is_training, bn_decay=None):
        """" Classification PointNet, input is BxNx3, output Bx40 """
        pointnet_util = imp.load_source(
            'pointnet_util',
            os.path.join(os.path.dirname(self.models["test"]), '../utils',
                         "pointnet_util.py"))
        tf_util = imp.load_source(
            'tf_util',
            os.path.join(os.path.dirname(self.models["test"]), '../utils',
                         "tf_util.py"))
        from pointnet_util import pointnet_sa_module, pointnet_sa_module_msg
        batch_size = self.configuration.batch_size
        num_point = self.configuration.n_input[0]
        end_points = {}
        l0_xyz = point_cloud
        l0_points = None

        # Set abstraction layers
        l1_xyz, l1_points = pointnet_sa_module_msg(
            l0_xyz,
            l0_points,
            512, [0.1, 0.2, 0.4], [16, 32, 128],
            [[32, 32, 64], [64, 64, 128], [64, 96, 128]],
            is_training,
            bn_decay,
            scope='layer1',
            use_nchw=True)
        l2_xyz, l2_points = pointnet_sa_module_msg(
            l1_xyz,
            l1_points,
            128, [0.2, 0.4, 0.8], [32, 64, 128],
            [[64, 64, 128], [128, 128, 256], [128, 128, 256]],
            is_training,
            bn_decay,
            scope='layer2')
        l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz,
                                                  l2_points,
                                                  npoint=None,
                                                  radius=None,
                                                  nsample=None,
                                                  mlp=[256, 512, 1024],
                                                  mlp2=None,
                                                  group_all=True,
                                                  is_training=is_training,
                                                  bn_decay=bn_decay,
                                                  scope='layer3')

        # Fully connected layers
        net = tf.reshape(l3_points, [batch_size, -1])
        end_points['post_max'] = net
        net = tf_util.fully_connected(net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.4,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.4,
                              is_training=is_training,
                              scope='dp2')
        end_points['final'] = net
        net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

        return net, end_points
def getEncoder_oneBranch_local(partPoints_input,
                               dis2Joint,
                               NumOfPts,
                               is_training,
                               reuse,
                               scope_predix="part_1",
                               verbose=True,
                               bn_decay=None):

    scname = scope_predix + "_pointNetPP_encoder"
    with tf.variable_scope(scname) as sc:
        if reuse:
            sc.reuse_variables()

        is_training = tf.constant(is_training, dtype=tf.bool)

        l0_xyz = partPoints_input
        l0_points = None

        print("partPoints_input.shape = ", partPoints_input.shape)
        if partPoints_input.shape[2] == 6:
            l0_xyz = partPoints_input[:, :, 0:3]
            l0_points = partPoints_input[:, :, 3:6]

        # assume self.shapeBatchSize==1
        inputDims = tf.reduce_max(l0_xyz, axis=[0, 1]) - tf.reduce_min(
            l0_xyz, axis=[0, 1])
        print("inputDims.shape = ", inputDims.shape)
        does_part_exist = tf.dtypes.cast(
            tf.reduce_mean(tf.abs(inputDims)) > 0.01, tf.float32)

        # Set Abstraction layers
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(
            l0_xyz,
            l0_points,
            npoint=256,
            radius=0.05,
            nsample=128,
            mlp=[32, 32, 64],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer1',
            bn=False)

        l2_xyz, l2_points, l2_indices = pointnet_sa_module(
            l1_xyz,
            l1_points,
            npoint=128,
            radius=0.1,
            nsample=128,
            mlp=[64, 64, 128],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer2',
            bn=False)

        l4_xyz, l4_points, l4_indices = pointnet_sa_module(
            l2_xyz,
            l2_points,
            npoint=None,
            radius=None,
            nsample=None,
            mlp=[128, 128, 128],
            mlp2=None,
            group_all=True,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layerxxx',
            bn=False)

        output_4 = tf.reshape(l4_points, [l0_xyz.shape[0], 128])

        print(('output_4.shape = %s', output_4.shape))

        return output_4 * does_part_exist
Exemple #33
0
    def __init__(self, points, features, num_class, is_training, setting):
        bn_decay = setting.get_bn_decay(tf.train.get_global_step())
        l0_xyz = points
        l0_points = None

        # Set abstraction layers
        l1_xyz, l1_points = pointnet_sa_module_msg(
            l0_xyz,
            l0_points,
            512, [0.1, 0.2, 0.4], [32, 64, 128],
            [[32, 32, 64], [64, 64, 128], [64, 96, 128]],
            is_training,
            bn_decay,
            scope='layer1')
        l2_xyz, l2_points = pointnet_sa_module_msg(
            l1_xyz,
            l1_points,
            128, [0.2, 0.4, 0.8], [64, 64, 128],
            [[64, 64, 128], [128, 128, 256], [128, 128, 256]],
            is_training,
            bn_decay,
            scope='layer2')
        l3_xyz, l3_points, _ = pointnet_sa_module(l3_input_shape,
                                                  l2_xyz,
                                                  l2_points,
                                                  npoint=None,
                                                  radius=None,
                                                  nsample=None,
                                                  mlp=[256, 512, 1024],
                                                  mlp2=None,
                                                  group_all=True,
                                                  is_training=is_training,
                                                  bn_decay=bn_decay,
                                                  scope='layer3')

        # Fully connected layers
        net = tf.reshape(l3_points, [l3_input_shape[0], -1])
        net = tf_util.fully_connected(FC1_inputs_shape,
                                      net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.4,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(FC2_inputs_shape,
                                      net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.4,
                              is_training=is_training,
                              scope='dp2')
        net = tf_util.fully_connected(FC3_inputs_shape,
                                      net,
                                      num_class,
                                      activation_fn=None,
                                      scope='fc3')

        self.logits = tf.expand_dims(net, axis=1)
Exemple #34
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx3 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    l0_xyz = point_cloud
    l0_points = None

    # Set abstraction layers
    # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
    # So we only use NCHW for layer 1 until this issue can be resolved.
    ''' shape=(batch_size, 1024, 128) '''
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=1024,
                                                       radius=0.1,
                                                       nsample=64,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='sa_layer1')
    ''' shape=(batch_size, 512, 256) '''
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz,
                                                       l1_points,
                                                       npoint=512,
                                                       radius=0.2,
                                                       nsample=64,
                                                       mlp=[128, 128, 256],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='sa_layer2')
    ''' shape=(batch_size, 128, 512) '''
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=128,
                                                       radius=0.4,
                                                       nsample=64,
                                                       mlp=[256, 256, 512],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='sa_layer3')
    ''' shape=(batch_size, 1, 1024) '''
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz,
                                                       l3_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[512, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='sa_layer4')

    # Fully connected layers
    net = tf.reshape(l4_points, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 3, activation_fn=None, scope='fc3')

    return net
Exemple #35
0
def get_model(point_cloud,
              is_training,
              num_class,
              FLAGS,
              GRAPH,
              bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud[:, :, :3]
    # l0_xyz [12 4096 3]
    l0_points = point_cloud[:, :, 3:]
    # l0_points [12 4096 6]
    end_points['l0_xyz'] = l0_xyz

    l1_xyz_sem, l1_points_sem, l1_indices_sem = pointnet_sa_module(
        l0_xyz,
        l0_points,
        npoint=1024,
        radius=0.1,
        nsample=32,
        mlp=[32, 32, 64],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer1_sem')
    l1_xyz_ins, l1_points_ins, l1_indices_ins = pointnet_sa_module(
        l0_xyz,
        l0_points,
        npoint=1024,
        radius=0.1,
        nsample=32,
        mlp=[32, 32, 64],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer1_ins')
    l1_points_ins, l1_points_sem = cfsm.twin_cfsm(l1_points_ins, l1_points_sem,
                                                  64, 1024, 1, FLAGS, GRAPH)

    # [12 1024 3]  [12 1024 64] [12 1024 32]
    l2_xyz_sem, l2_points_sem, l2_indices_sem = pointnet_sa_module(
        l1_xyz_sem,
        l1_points_sem,
        npoint=256,
        radius=0.2,
        nsample=32,
        mlp=[64, 64, 128],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer2_sem')
    l2_xyz_ins, l2_points_ins, l2_indices_ins = pointnet_sa_module(
        l1_xyz_ins,
        l1_points_ins,
        npoint=256,
        radius=0.2,
        nsample=32,
        mlp=[64, 64, 128],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer2_ins')
    l2_points_ins, l2_points_sem = cfsm.twin_cfsm(l2_points_ins, l2_points_sem,
                                                  128, 256, 2, FLAGS, GRAPH)

    # [12 256 3]   [12 256 128] [12 256 32]
    l3_xyz_sem, l3_points_sem, l3_indices_sem = pointnet_sa_module(
        l2_xyz_sem,
        l2_points_sem,
        npoint=64,
        radius=0.4,
        nsample=32,
        mlp=[128, 128, 256],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer3_sem')
    l3_xyz_ins, l3_points_ins, l3_indices_ins = pointnet_sa_module(
        l2_xyz_ins,
        l2_points_ins,
        npoint=64,
        radius=0.4,
        nsample=32,
        mlp=[128, 128, 256],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer3_ins')
    l3_points_ins, l3_points_sem = cfsm.twin_cfsm(l3_points_ins, l3_points_sem,
                                                  256, 64, 3, FLAGS, GRAPH)

    # [12 64 3]    [12 64 256]  [12 64 32]
    l4_xyz_sem, l4_points_sem, l4_indices_sem = pointnet_sa_module(
        l3_xyz_sem,
        l3_points_sem,
        npoint=16,
        radius=0.8,
        nsample=32,
        mlp=[256, 256, 512],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer4_sem')
    l4_xyz_ins, l4_points_ins, l4_indices_ins = pointnet_sa_module(
        l3_xyz_ins,
        l3_points_ins,
        npoint=16,
        radius=0.8,
        nsample=32,
        mlp=[256, 256, 512],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer4_ins')
    l4_points_ins, l4_points_sem = cfsm.twin_cfsm(l4_points_ins, l4_points_sem,
                                                  512, 16, 4, FLAGS, GRAPH)
    # [12 16 3]    [12 16 512]  [12 16 32]

    # l3_points_sem [12 64 256]
    l3_points_sem_dec = pointnet_fp_module(l3_xyz_sem,
                                           l4_xyz_sem,
                                           l3_points_sem,
                                           l4_points_sem, [256, 256],
                                           is_training,
                                           bn_decay,
                                           scope='sem_fa_layer1')
    l3_points_ins_dec = pointnet_fp_module(l3_xyz_ins,
                                           l4_xyz_ins,
                                           l3_points_ins,
                                           l4_points_ins, [256, 256],
                                           is_training,
                                           bn_decay,
                                           scope='ins_fa_layer1')

    # l2_points_sem [12 256 256]
    l2_points_sem_dec = pointnet_fp_module(l2_xyz_sem,
                                           l3_xyz_sem,
                                           l2_points_sem,
                                           l3_points_sem_dec, [256, 256],
                                           is_training,
                                           bn_decay,
                                           scope='sem_fa_layer2')
    l2_points_ins_dec = pointnet_fp_module(l2_xyz_ins,
                                           l3_xyz_ins,
                                           l2_points_ins,
                                           l3_points_ins_dec, [256, 256],
                                           is_training,
                                           bn_decay,
                                           scope='ins_fa_layer2')

    # l1_points_sem [12 1024 128]
    l1_points_sem_dec = pointnet_fp_module(l1_xyz_sem,
                                           l2_xyz_sem,
                                           l1_points_sem,
                                           l2_points_sem_dec, [256, 128],
                                           is_training,
                                           bn_decay,
                                           scope='sem_fa_layer3')
    l1_points_ins_dec = pointnet_fp_module(l1_xyz_ins,
                                           l2_xyz_ins,
                                           l1_points_ins,
                                           l2_points_ins_dec, [256, 128],
                                           is_training,
                                           bn_decay,
                                           scope='ins_fa_layer3')

    # l0_points_sem [12 4096 128]
    l0_points_sem_dec = pointnet_fp_module(l0_xyz,
                                           l1_xyz_sem,
                                           l0_points,
                                           l1_points_sem_dec, [128, 128, 128],
                                           is_training,
                                           bn_decay,
                                           scope='sem_fa_layer4')
    l0_points_ins_dec = pointnet_fp_module(l0_xyz,
                                           l1_xyz_ins,
                                           l0_points,
                                           l1_points_ins_dec, [128, 128, 128],
                                           is_training,
                                           bn_decay,
                                           scope='ins_fa_layer4')
    net_ins = tf_util.conv1d(l0_points_ins_dec,
                             128,
                             1,
                             padding='VALID',
                             bn=True,
                             is_training=is_training,
                             scope='ins_fc5',
                             bn_decay=bn_decay)
    net_sem = tf_util.conv1d(l0_points_sem_dec,
                             128,
                             1,
                             padding='VALID',
                             bn=True,
                             is_training=is_training,
                             scope='sem_fc5',
                             bn_decay=bn_decay)

    net_ins = tf_util.dropout(net_ins,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='ins_dp1')
    net_ins = tf_util.conv1d(net_ins,
                             5,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='ins_fc6')
    net_sem = tf_util.dropout(net_sem,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='sem_dp1')
    net_sem = tf_util.conv1d(net_sem,
                             num_class,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='sem_fc6')

    return net_sem, net_ins  # net_sem [12 4096 13]  net_ins [12 4096 5]
Exemple #36
0
    def get_model_w_ae_pp(self, point_cloud, is_training, bn_decay=None):
        """" Classification PointNet, input is BxNx3, output Bx40 """
        pointnet_util = imp.load_source(
            'pointnet_util',
            os.path.join(os.path.dirname(self.models["test"]), '../utils',
                         "pointnet_util.py"))
        tf_util = imp.load_source(
            'tf_util',
            os.path.join(os.path.dirname(self.models["test"]), '../utils',
                         "tf_util.py"))
        from pointnet_util import pointnet_sa_module
        batch_size = self.configuration.batch_size
        num_point = self.configuration.n_input[0]
        end_points = {}
        l0_xyz = point_cloud
        l0_points = None
        end_points['l0_xyz'] = l0_xyz

        # Set abstraction layers
        # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
        # So we only use NCHW for layer 1 until this issue can be resolved.
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(
            l0_xyz,
            l0_points,
            npoint=512,
            radius=0.2,
            nsample=32,
            mlp=[64, 64, 128],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer1',
            use_nchw=True)
        l2_xyz, l2_points, l2_indices = pointnet_sa_module(
            l1_xyz,
            l1_points,
            npoint=128,
            radius=0.4,
            nsample=64,
            mlp=[128, 128, 256],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer2')
        l3_xyz, l3_points, l3_indices = pointnet_sa_module(
            l2_xyz,
            l2_points,
            npoint=None,
            radius=None,
            nsample=None,
            mlp=[256, 512, 1024],
            mlp2=None,
            group_all=True,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer3')

        # Fully connected layers
        net = tf.reshape(l3_points, [batch_size, -1])
        end_points['post_max'] = net
        net = tf_util.fully_connected(net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp2')
        end_points['final'] = net
        net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

        return net, end_points
Exemple #37
0
def get_model(point_cloud, num_frame, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx3, output BxNx2 """
    """ Classification PointNet, input is BxNx3, output Bx3 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    ''' shape=(batch_size, num_point, 3) '''
    with tf.variable_scope("generator"):
        l0_xyz = point_cloud
        l0_points = None
        # Set Abstraction layers
        ''' shape=(batch_size, 1024, 128) '''
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(
            l0_xyz,
            l0_points,
            npoint=1024,
            radius=0.1,
            nsample=64,
            mlp=[64, 64, 128],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer1')
        ''' shape=(batch_size, 384, 256) '''
        l2_xyz, l2_points, l2_indices = pointnet_sa_module(
            l1_xyz,
            l1_points,
            npoint=512,
            radius=0.2,
            nsample=64,
            mlp=[128, 128, 256],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer2')
        ''' shape=(batch_size, 128, 512) '''
        l3_xyz, l3_points, l3_indices = pointnet_sa_module(
            l2_xyz,
            l2_points,
            npoint=128,
            radius=0.4,
            nsample=64,
            mlp=[256, 256, 512],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer3')
        ''' shape=(batch_size, 1, 1024) '''
        l4_xyz, l4_points, l4_indices = pointnet_sa_module(
            l3_xyz,
            l3_points,
            npoint=None,
            radius=None,
            nsample=None,
            mlp=[512, 512, 1024],
            mlp2=None,
            group_all=True,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer4')

        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=1024,
                                                 forget_bias=1.0,
                                                 name='lstm')
        init_input = tf.reshape(l4_points, [batch_size, -1])

        init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
        pc = []
        disp = []
        with tf.variable_scope("RNN"):
            for time_step in range(num_frame):
                if time_step > 0: tf.get_variable_scope().reuse_variables()
                if time_step == 0:
                    (cell_output, state) = lstm_cell(init_input, init_state)
                else:
                    (cell_output, state) = lstm_cell(init_input, state)

                cell_output = tf.reshape(cell_output, [batch_size, 1, 1024])
                # Feature Propagation layers
                ''' shape=(batch_size, 128, 256) '''
                l3_points_ = pointnet_fp_module(l3_xyz,
                                                l4_xyz,
                                                l3_points,
                                                cell_output, [512, 512],
                                                is_training,
                                                bn_decay,
                                                scope='fp_layer1')
                ''' shape=(batch_size, 128, 256) '''
                l2_points_ = pointnet_fp_module(l2_xyz,
                                                l3_xyz,
                                                l2_points,
                                                l3_points_, [512, 512],
                                                is_training,
                                                bn_decay,
                                                scope='fp_layer2')
                ''' shape=(batch_size, 512, 128) '''
                l1_points_ = pointnet_fp_module(l1_xyz,
                                                l2_xyz,
                                                l1_points,
                                                l2_points_, [256, 128],
                                                is_training,
                                                bn_decay,
                                                scope='fp_layer3')
                ''' shape=(batch_size, 1024, 128) '''
                l0_points_ = pointnet_fp_module(l0_xyz,
                                                l1_xyz,
                                                l0_points,
                                                l1_points_, [128, 128, 128],
                                                is_training,
                                                bn_decay,
                                                scope='fp_layer4')

                # FC layers for feature extraction
                ''' shape = (batch_size, num_point, 128) '''
                fea_fc = tf_util.conv1d(l0_points_,
                                        128,
                                        1,
                                        padding='VALID',
                                        bn=True,
                                        is_training=is_training,
                                        scope='fea_fc1',
                                        bn_decay=bn_decay)
                ''' shape = (batch_size, num_point, 64) '''
                fea_fc = tf_util.conv1d(fea_fc,
                                        64,
                                        1,
                                        padding='VALID',
                                        bn=True,
                                        is_training=is_training,
                                        scope='fea_fc2',
                                        bn_decay=bn_decay)
                fea_fc = tf_util.dropout(fea_fc,
                                         keep_prob=0.5,
                                         is_training=is_training,
                                         scope='fea_dp1')
                ''' shape = (batch_size, num_point, 3) '''
                disp_out = tf_util.conv1d(fea_fc,
                                          3,
                                          1,
                                          padding='VALID',
                                          activation_fn=None,
                                          scope='fea_fc3')
                disp.append(disp_out)
                pc.append(point_cloud + disp_out)
                point_cloud += disp_out
        '''shape=(num_frame, batch_size, num_point, 3)'''
        pc = tf.stack(pc)
        disp = tf.stack(disp)
        '''shape=(batch_size, num_frame, num_point, 3)'''
        pc = tf.transpose(pc, [1, 0, 2, 3])
        disp = tf.transpose(disp, [1, 0, 2, 3])

        # FC layers for segmentation
        seg_fc = tf.reshape(tf.transpose(disp, [0, 2, 1, 3]),
                            [batch_size, num_point, num_frame * 3])
        seg_fc = tf_util.conv1d(seg_fc,
                                8,
                                1,
                                padding='VALID',
                                bn=True,
                                is_training=is_training,
                                scope='seg_fc1',
                                bn_decay=bn_decay)
        seg_dp = tf_util.dropout(seg_fc,
                                 keep_prob=0.5,
                                 is_training=is_training,
                                 scope='seg_dp1')
        seg_fc = tf_util.conv1d(seg_dp,
                                2,
                                1,
                                padding='VALID',
                                bn=True,
                                is_training=is_training,
                                scope='seg_fc2',
                                bn_decay=bn_decay)
        mov_seg = tf.reshape(seg_fc, [batch_size, num_point, 2])

    with tf.variable_scope("partseg"):
        seg_l0_points = tf.reshape(disp,
                                   (batch_size, num_point, 3 * num_frame))

        mov_mask = tf.cast(tf.greater(tf.argmax(mov_seg, 2), 0), tf.int32)
        mask_tiled = tf.cast(tf.expand_dims(mov_mask, -1), tf.float32)
        mask_tiled = tf.tile(mask_tiled, [1, 1, 3 * num_frame])
        seg_l0_points = seg_l0_points * mask_tiled
        ''' shape=(batch_size, 1024, 128) '''
        seg_l1_xyz, seg_l1_points, seg_l1_indices = pointnet_sa_module(
            l0_xyz,
            seg_l0_points,
            npoint=1024,
            radius=0.1,
            nsample=32,
            mlp=[32, 32, 64],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer5')
        ''' shape=(batch_size, 384, 256) '''
        seg_l2_xyz, seg_l2_points, seg_l2_indices = pointnet_sa_module(
            seg_l1_xyz,
            seg_l1_points,
            npoint=512,
            radius=0.2,
            nsample=32,
            mlp=[64, 64, 128],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer6')
        ''' shape=(batch_size, 128, 512) '''
        seg_l3_xyz, seg_l3_points, seg_l3_indices = pointnet_sa_module(
            seg_l2_xyz,
            seg_l2_points,
            npoint=128,
            radius=0.4,
            nsample=32,
            mlp=[128, 128, 256],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer7')
        ''' shape=(batch_size, 1, 1024) '''
        seg_l4_xyz, seg_l4_points, seg_l4_indices = pointnet_sa_module(
            seg_l3_xyz,
            seg_l3_points,
            npoint=32,
            radius=0.8,
            nsample=32,
            mlp=[256, 256, 512],
            mlp2=None,
            group_all=True,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='sa_layer8')
        # Feature Propagation layers
        ''' shape=(batch_size, 128, 256) '''
        seg_l3_points_ = pointnet_fp_module(seg_l3_xyz,
                                            seg_l4_xyz,
                                            seg_l3_points,
                                            seg_l4_points, [256, 256],
                                            is_training,
                                            bn_decay,
                                            scope='fp_layer5')
        ''' shape=(batch_size, 128, 256) '''
        seg_l2_points_ = pointnet_fp_module(seg_l2_xyz,
                                            seg_l3_xyz,
                                            seg_l2_points,
                                            seg_l3_points_, [256, 256],
                                            is_training,
                                            bn_decay,
                                            scope='fp_layer6')
        ''' shape=(batch_size, 512, 128) '''
        seg_l1_points_ = pointnet_fp_module(seg_l1_xyz,
                                            seg_l2_xyz,
                                            seg_l1_points,
                                            seg_l2_points_, [256, 128],
                                            is_training,
                                            bn_decay,
                                            scope='fp_layer7')
        ''' shape=(batch_size, 1024, 128) '''
        seg_l0_points_ = pointnet_fp_module(l0_xyz,
                                            seg_l1_xyz,
                                            seg_l0_points,
                                            seg_l1_points_, [128, 128, 128],
                                            is_training,
                                            bn_decay,
                                            scope='fp_layer8')

        sim_features = tf_util.conv1d(seg_l0_points_,
                                      128,
                                      1,
                                      padding='VALID',
                                      bn=True,
                                      is_training=is_training,
                                      scope='seg_fc3',
                                      bn_decay=bn_decay)
        sim_features = tf_util.dropout(sim_features,
                                       keep_prob=0.5,
                                       is_training=is_training,
                                       scope='seg_dp2')
        sim_features = tf_util.conv1d(sim_features,
                                      128,
                                      1,
                                      padding='VALID',
                                      activation_fn=None,
                                      scope='seg_fc4')

        r = tf.reduce_sum(sim_features * sim_features, 2)
        r = tf.reshape(r, [batch_size, -1, 1])
        print(r.get_shape(), sim_features.get_shape())
        # (x-y)^2 = x^2 - 2*x*y + y^2
        D = r - 2 * tf.matmul(
            sim_features, tf.transpose(
                sim_features, perm=[0, 2, 1])) + tf.transpose(r,
                                                              perm=[0, 2, 1])
        simmat_logits = tf.maximum(10 * D, 0.)

    return pc, disp, mov_seg, mov_mask, simmat_logits