예제 #1
0
    def shape_to_patch(self, label_input_ratio):
        """
        input: [batchsize, P, 3]
        """
        input_pc, label_pc = label_input_ratio['input'], label_input_ratio[
            'label']
        ratio = label_input_ratio["ratio"]
        # B, 1, 1
        rnd_pts = tf.random_uniform((self.batch_size, 1, 1),
                                    dtype=tf.int32,
                                    maxval=self.saved_patch_size[0])
        batch_indices = tf.reshape(tf.range(self.batch_size), (-1, 1, 1))
        indices = tf.concat([batch_indices, rnd_pts], axis=-1)
        rnd_pts = tf.gather_nd(label_pc, indices)  # [batch_size, 1, 3]
        _, knn_index = knn_point(self.num_in_point * ratio, label_pc,
                                 rnd_pts)  # [batch_size, 1, num_gt_point, 2]
        label_patches = tf.gather_nd(
            label_pc, knn_index)  # [batch_size, 1, num_gt_point, 3]
        _, knn_index = knn_point(self.num_in_point, input_pc, rnd_pts)
        input_patches = tf.gather_nd(
            input_pc, knn_index)  # [batch_size, 1, num_gt_point/up_ratio, 3]
        label_patches = tf.squeeze(label_patches,
                                   axis=1)  # [batch_size, num_gt_point, 3]
        input_patches = tf.squeeze(input_patches, axis=1)

        label_patches, centroid, furthest_distance = normalize_point_cloud(
            label_patches)
        input_patches = (input_patches - centroid) / furthest_distance
        radius = tf.constant(np.ones((self.batch_size)), dtype=tf.float32)
        return {
            "label": label_patches,
            "input": input_patches,
            "radius": radius,
            "ratio": ratio
        }
def get_edge_feature(point_cloud, k=20, idx=None):
    """Construct edge feature for each point
    Args:
        point_cloud: (batch_size, num_points, 1, num_dims)
        nn_idx: (batch_size, num_points, k, 2)
        k: int
    Returns:
        edge features: (batch_size, num_points, k, num_dims)
    """
    if idx is None:
        _, idx = knn_point(k + 1,
                           point_cloud,
                           point_cloud,
                           unique=True,
                           sort=True)
        idx = idx[:, :, 1:, :]

    # [N, P, K, Dim]
    point_cloud_neighbors = tf.gather_nd(point_cloud, idx)
    point_cloud_central = tf.expand_dims(point_cloud, axis=-2)

    point_cloud_central = tf.tile(point_cloud_central, [1, 1, k, 1])

    edge_feature = tf.concat(
        [point_cloud_central, point_cloud_neighbors - point_cloud_central],
        axis=-1)
    return edge_feature, idx
예제 #3
0
def get_smooth_and_uniform_loss(pred,
                                normal,
                                nsample=20,
                                radius=0.07,
                                knn=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one
    val = tf.maximum(0.0, 0.001 + val)
    uniform_loss = tf.reduce_mean(val)

    # idx = idx[:, :, 1:]  # (batch_size, npoint, 4)
    # batch_size = pred.get_shape()[0].value
    # nPoints = pred.get_shape()[1].value
    # grouped_pred_reshape = tf.reshape(grouped_pred, (-1, 3))
    # indics = tf.reshape(tf.range(batch_size*nPoints), (batch_size*nPoints, 1)) * nsample + tf.reshape(idx,[batch_size*nPoints,-1])
    # grouped_pred = tf.gather(grouped_pred_reshape, indics)
    # grouped_pred = tf.reshape(grouped_pred,(batch_size,nPoints,4,-1))
    # grouped_pred = tf.nn.l2_normalize(grouped_pred, dim=-1)
    # inner_product = tf.abs(tf.reduce_sum(grouped_pred * tf.expand_dims(normal, axis=2), axis=-1))  # (batch_size, npoint,nsample)
    # smooth_loss = tf.reduce_mean(inner_product)
    return uniform_loss, 0
예제 #4
0
def get_perulsion_loss(pred, nsample=15, radius=0.07, knn=False, numpoint=4096, use_l1=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred ** 2, axis=-1)
    if use_l1:
        dists = tf.sqrt(dists+1e-12)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(0.001)*2
    else:
        h = 0.001
    print "h is ",h
    val = tf.maximum(0.0, h + val) # dd/np.sqrt(n)
    perulsion_loss = tf.reduce_mean(val)
    return perulsion_loss
예제 #5
0
def get_perulsion_loss1_orthdistance(pred, normal, nsample=15, radius=0.07, knn=False, numpoint=4096,use_l1=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    offset = grouped_pred-tf.expand_dims(pred, 2)
    normal = tf.expand_dims(normal,axis=2)
    dists = offset -tf.reduce_sum(normal*offset,axis=-1,keep_dims=True)*normal

    dists = tf.reduce_sum(dists ** 2, axis=-1)
    if use_l1:
        dists = tf.sqrt(dists+1e-12)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    h = (2.0 / np.sqrt(numpoint)) ** 2
    if use_l1:
        h = np.sqrt(0.001)*2
    else:
        h = 0.001
    print "h is ", h

    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    uniform_loss = tf.reduce_mean(val)

    return 20*uniform_loss
예제 #6
0
def get_uniform_loss(pcd,
                     percentages=[0.004, 0.006, 0.008, 0.010, 0.012],
                     radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        disk_area = math.pi * (radius**2) * p / nsample
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        #expect_len =  tf.sqrt(2*disk_area/1.732)#using hexagon
        expect_len = tf.sqrt(disk_area)  # using square

        grouped_pcd = group_point(pcd, idx)
        grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)

        var, _ = knn_point(2, grouped_pcd, grouped_pcd)
        uniform_dis = -var[:, :, 1:]
        uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8))
        uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1])
        uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
        uniform_dis = tf.reshape(uniform_dis, [-1])

        mean, variance = tf.nn.moments(uniform_dis, axes=0)
        mean = mean * math.pow(p * 100, 2)
        #nothing 4
        loss.append(mean)
    return tf.add_n(loss) / len(percentages)
예제 #7
0
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
예제 #8
0
def get_pca_loss(pred_edge):
    nsample = 10
    idx = knn_point(nsample, pred_edge, pred_edge)
    grouped_pred = group_point(pred_edge,idx) # (batch_size, npoint, nsample, 3)
    W = tf.get_variable('pca',shape=(3,1))


    return
예제 #9
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    '''
    input data: xyz.shape[0] (N)*(d+C);
    a set of centroids: npoint (N')*d
    neighbors: nsample (K)*(d+C)
    farthest_point_sample output npoint's index.
    gather_point: output npoint 's data according to index and input data
    '''
    # aaa = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3)
    # print('xys:', new_xyz.get_shape())
    # print('new_xyz in s g:', new_xyz.get_shape(), 'npoint:', npoint)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        # K‘ flexiable, but less than nsample, paper not refered
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
예제 #10
0
    def shape_to_patch(self, label_input_radius_ratio):
        # random sample points as seed
        # knn on seeds
        # normalize patch and update radius
        input_pc, label_pc, radius = label_input_radius_ratio[
            'input'], label_input_radius_ratio[
                'label'], label_input_radius_ratio['radius']
        ratio = label_input_radius_ratio["ratio"]
        if self.jitter:
            input_pc, centroid, furthest_distance = normalize_point_cloud(
                input_pc)
            input_pc = jitter_perturbation_point_cloud(input_pc,
                                                       sigma=self.jitter_sigma,
                                                       clip=self.jitter_max)
        # B, 1, 1
        rnd_pts = tf.random_uniform((self.batch_size, 1, 1),
                                    dtype=tf.int32,
                                    maxval=self.input_placeholder.shape[1])
        batch_indices = tf.reshape(tf.range(self.batch_size), (-1, 1, 1))
        indices = tf.concat([batch_indices, rnd_pts], axis=-1)
        rnd_pts = tf.gather_nd(label_pc, indices)  # [batch_size, 1, 3]
        _, knn_index = knn_point(self.num_in_point * ratio, label_pc,
                                 rnd_pts)  # [batch_size, 1, num_gt_point, 2]
        label_patches = tf.gather_nd(
            label_pc, knn_index)  # [batch_size, 1, num_gt_point, 3]
        _, knn_index = knn_point(self.num_in_point, input_pc, rnd_pts)
        input_patches = tf.gather_nd(
            input_pc, knn_index)  # [batch_size, 1, num_gt_point/up_ratio, 3]
        label_patches = tf.squeeze(label_patches,
                                   axis=1)  # [batch_size, num_gt_point, 3]
        input_patches = tf.squeeze(input_patches, axis=1)

        label_patches, centroid, furthest_distance = normalize_point_cloud(
            label_patches)
        input_patches = (input_patches - centroid) / furthest_distance
        radius = tf.constant(np.ones((self.batch_size)), dtype=tf.float32)
        return {
            "label": label_patches,
            "input": input_patches,
            "radius": radius,
            "ratio": ratio
        }
예제 #11
0
    def shape_to_patch(self, label_input_radius):
        # random sample points as seed
        # knn on seeds
        # normalize patch and update radius
        input_pc, label_pc, radius = label_input_radius[
            'input'], label_input_radius['label'], label_input_radius['radius']
        label_pc = tf.expand_dims(label_pc, 0)
        input_pc = tf.expand_dims(input_pc, 0)
        if self.jitter:
            input_pc, centroid, furthest_distance = normalize_point_cloud(
                input_pc)
            input_pc = self.jitter_perturbation_point_cloud(
                input_pc, sigma=self.jitter_sigma, clip=self.jitter_max)
        rnd_pts = tf.random_uniform((1, self.batch_size),
                                    dtype=tf.int32,
                                    maxval=tf.shape(input_pc)[1])
        rnd_pts = tf.batch_gather(label_pc, rnd_pts)  # [1, batch_size, 3]
        _, knn_index = knn_point(self.num_gt_point, label_pc,
                                 rnd_pts)  # [1, batch_size, num_gt_point, 2]
        label_patches = tf.gather_nd(
            label_pc, knn_index)  # [1, batch_size, num_gt_point, 3]
        _, knn_index = knn_point(self.num_in_point, input_pc, rnd_pts)
        input_patches = tf.gather_nd(
            input_pc, knn_index)  # [1, batch_size, num_gt_point/up_ratio, 3]

        label_patches = tf.squeeze(label_patches,
                                   axis=0)  # [batch_size, num_gt_point, 3]
        input_patches = tf.squeeze(input_patches, axis=0)

        label_patches, centroid, furthest_distance = self.normalize_point_cloud(
            label_patches)
        input_patches = (input_patches - centroid) / furthest_distance
        radius = tf.constant(np.ones((self.batch_size)), dtype=tf.float32)

        return {
            "label": label_patches,
            "input": input_patches,
            "radius": radius
        }
예제 #12
0
파일: model_utils.py 프로젝트: zeta1999/3PU
def get_repulsion_loss(pred,
                       nsample=20,
                       radius=0.07,
                       knn=False,
                       use_l1=False,
                       h=0.001):
    # # pred: (batch_size, npoint,3)
    # idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    # tf.summary.histogram('smooth/unque_index', pts_cnt)

    # grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    # grouped_pred -= tf.expand_dims(pred, 2)

    # # get the uniform loss
    # h = 0.03
    # dist_square = tf.reduce_sum(grouped_pred ** 2, axis=-1)
    # dist_square, idx = tf.nn.top_k(-dist_square, 5)
    # dist_square = -dist_square[:, :, 1:]  # remove the first one
    # dist_square = tf.maximum(1e-12,dist_square)
    # dist = tf.sqrt(dist_square)
    # weight = tf.exp(-dist_square/h**2)
    # uniform_loss = tf.reduce_mean(radius-dist*weight)
    # return uniform_loss
    # pred: (batch_size, npoint,3)
    if knn:
        _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    # get the uniform loss
    if use_l1:
        dists = tf.reduce_sum(tf.abs(grouped_pred), axis=-1)
    else:
        dists = tf.reduce_sum(grouped_pred**2, axis=-1)

    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(h) * 2
    print(("h is ", h))

    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    repulsion_loss = tf.reduce_mean(val)
    return repulsion_loss
예제 #13
0
def query_and_group_points(xyz,
                           points,
                           new_xyz,
                           nsample,
                           radius,
                           knn=False,
                           use_xyz=True,
                           normalize_radius=True,
                           orientations=None):

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
        pts_cnt = nsample  # Hack. By right should make sure number of input points < nsample
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    tf.summary.histogram('pts_cnt', pts_cnt)

    # Group XYZ coordinates
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = grouped_xyz - tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if normalize_radius:
        grouped_xyz /= radius  # Scale normalization
    # 2D-rotate via orientations if necessary
    if orientations is not None:
        cosval = tf.expand_dims(tf.cos(orientations), axis=2)
        sinval = tf.expand_dims(tf.sin(orientations), axis=2)
        grouped_xyz = tf.stack([
            cosval * grouped_xyz[:, :, :, 0] +
            sinval * grouped_xyz[:, :, :, 1],
            -sinval * grouped_xyz[:, :, :, 0] +
            cosval * grouped_xyz[:, :, :, 1], grouped_xyz[:, :, :, 2]
        ],
                               axis=3)

    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:

            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_points, idx
예제 #14
0
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        if np.isscalar(radius):
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            tf.summary.histogram('pts_cnt', pts_cnt)
        else:
            idx_list = []
            for radius_one, xyz_one, new_xyz_one in zip(tf.unstack(radius,axis=0), tf.unstack(xyz, axis=0),tf.unstack(new_xyz, axis=0)):
                idx_one, pts_cnt = query_ball_point(radius_one, nsample, tf.expand_dims(xyz_one, axis=0), tf.expand_dims(new_xyz_one, axis=0))
                idx_list.append(idx_one)
            idx = tf.stack(idx_list, axis=0)
            idx = tf.squeeze(idx, axis=1)

    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]),grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
            new_points = tf.concat([grouped_xyz, grouped_points],axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        # new_points =  tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])], axis=-1)
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
예제 #15
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    indecies = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, indecies)  # (batch_size, npoint, 3)
    new_normals = gather_point(normals, indecies)  # (batch_size, npoint, 3)
    _, idx = knn_point(nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, new_normals, idx, grouped_xyz
예제 #16
0
def group(xyz, points, k, dilation=1, use_xyz=False):
    _, idx = knn_point(k * dilation + 1, xyz, xyz)
    idx = idx[:, :, 1::dilation]

    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, k, 3)
    grouped_xyz -= tf.expand_dims(xyz, 2)  # translation normalization
    if points is not None:
        grouped_points = group_point(points,
                                     idx)  # (batch_size, npoint, k, channel)
        if use_xyz:
            grouped_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, k, 3+channel)
    else:
        grouped_points = grouped_xyz

    return grouped_xyz, grouped_points, idx
예제 #17
0
def get_uniform_loss2(pred, nsample=20, radius=0.07, knn=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one
    uniform_loss = tf.reduce_mean(tf.exp(val / 0.03**2))
    return 0.2 * uniform_loss
예제 #18
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True,
                     keypoints=None,
                     orientations=None,
                     normalize_radius=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        keypoints: None or tensor with shape [None, None, 3], containing the xyz of keypoints.
                   If provided, npoint will be ignored, and iterative furthest sampling will be skipped
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor, i.e. cluster center (dim=3)
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor (dim=3+c, first 3 dimensions are normalized XYZ)
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions. This is usually the first 3 dimensions of new_points
    '''

    end_points = {}

    if keypoints is not None:
        new_xyz = keypoints
    else:
        new_xyz = gather_point(xyz, farthest_point_sample(
            npoint, xyz))  # (batch_size, npoint, 3)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
        pts_cnt = nsample  # Hack. By right should make sure number of input points < nsample
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = grouped_xyz - tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if normalize_radius:
        grouped_xyz /= radius

    end_points['grouped_xyz_before'] = grouped_xyz

    # 2D-rotate via orientations if necessary
    if orientations is not None:
        cosval = tf.cos(orientations)
        sinval = tf.sin(orientations)
        one = tf.ones_like(cosval)
        zero = tf.zeros_like(cosval)
        R = tf.stack([(cosval, sinval, zero), (-sinval, cosval, zero),
                      (zero, zero, one)],
                     axis=0)
        R = tf.transpose(R, perm=[2, 3, 0, 1])
        grouped_xyz = tf.matmul(grouped_xyz, R)
        end_points['rotation'] = R

    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    end_points['grouped_xyz'] = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz, end_points
예제 #19
0
def get_smooth_loss(pred,
                    normal,
                    nsample=20,
                    radius=0.05,
                    knn=False,
                    selected=True,
                    re_weight=False,
                    grouping=None):
    # pred: (batch_size, npoint,3)
    # normal : (batch_size,npoint,3)
    if selected:
        radius = 1.0 * radius
        nsample = int(1.0 * nsample)
    # first get some neighborhood points
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    idx = idx[:, :, 1:]
    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    grouped_pred_normilize = tf.nn.l2_normalize(grouped_pred, dim=-1)
    inner_product = tf.abs(
        tf.reduce_sum(grouped_pred_normilize * tf.expand_dims(normal, axis=2),
                      axis=-1))  # (batch_size, npoint,nsample)
    if re_weight:
        alpha = 5
        inner_product = (tf.exp(alpha * inner_product) - 1) / (
            np.exp(alpha) - 1)  # (batch_size, npoint,nsample)
    if grouping == 'exp_weighted':
        epision = 1e-12
        dists = tf.norm(grouped_pred + epision,
                        axis=-1)  # (batch_size, npoint,nsample)
        dists = tf.maximum(dists, 1e-10)  # (batch_size, npoint,nsample)
        exp_dists = tf.exp(-dists * 20)  # (batch_size, npoint,nsample)
        weights = exp_dists / tf.reduce_sum(
            exp_dists, axis=2, keep_dims=True)  # (batch_size, npoint, nsample)
        tf.summary.histogram('smooth/weighted', weights)
        inner_product = weights * inner_product

    if selected:
        grouped_normal = group_point(normal, idx)
        mask = tf.to_float(
            tf.greater(
                tf.reduce_sum(grouped_normal * tf.expand_dims(normal, axis=2),
                              axis=-1), 0.0))
        tf.summary.histogram('smooth/mask1', tf.count_nonzero(mask, axis=-1))
        smooth_loss = tf.reduce_sum(mask * inner_product) / tf.reduce_sum(mask)
    else:
        smooth_loss = tf.reduce_mean(inner_product)

    return smooth_loss
예제 #20
0
    pprint(train_relpath)
    print("train:", len(dirs), len(train_relpath))

    train_relpath_shards = [
        train_relpath[start:start + shard]
        for start in range(0, len(train_relpath), shard)
    ]

    # placeholder for point cloud
    input_placeholder = tf.placeholder(tf.float32, [1, None, 3])
    num_in_point_placeholder = tf.placeholder(tf.int32, [])
    seed_points_placeholder = tf.placeholder(tf.float32,
                                             [1, num_patch_per_shape, 3])

    _, knn_index = knn_point(num_in_point_placeholder, input_placeholder,
                             seed_points_placeholder)
    # [batch_size, 1, num_gt_point/up_ratio, 3]
    input_patches = tf.gather_nd(input_placeholder, knn_index)
    input_patches = tf.squeeze(input_patches, axis=0)

    with tf.Session() as sess:
        for i, train_relpath in enumerate(train_relpath_shards):
            print("shard {}".format(i))
            print(train_relpath)
            with tf.python_io.TFRecordWriter(
                    os.path.join(
                        out_dir, "{}_p{}_shard{}.tfrecord".format(
                            "_".join(datasets), num_point, i))) as writer:
                for p in train_relpath:
                    seed_points = None
                    centroid = furthest_distance = None
예제 #21
0
파일: model_utils.py 프로젝트: zeta1999/3PU
def extract_patch_for_next_level(batch_xyz,
                                 k,
                                 batch_features=None,
                                 gt_xyz=None,
                                 gt_k=None,
                                 is_training=True):
    """
    :param batch_xyz [B, P, 3]
    """
    batch_size, num_point, _ = batch_xyz.shape.as_list()
    with tf.name_scope("extract_input"):
        if is_training:
            # B, 1, 3
            idx = tf.random_uniform([batch_size, 1],
                                    minval=0,
                                    maxval=num_point,
                                    dtype=tf.int32)
            # idx = tf.constant(250, shape=[batch_size, 1], dtype=tf.int32)
            batch_seed_point = gather_point(batch_xyz, idx)
            patch_num = 1
        else:
            assert (batch_size == 1)
            # remove residual, (B P 1) and (B, P, 1, 2)
            closest_d, _ = knn_point(2, batch_xyz, batch_xyz, unique=False)
            closest_d = closest_d[:, :, 1:]
            # (B, P)
            mask = tf.squeeze(
                closest_d < 5 *
                (tf.reduce_mean(closest_d, axis=1, keepdims=True)),
                axis=-1)
            # filter (B, P', 3)
            batch_xyz = tf.expand_dims(tf.boolean_mask(batch_xyz, mask),
                                       axis=0)
            # batch_xyz = tf.Print(batch_xyz, [tf.shape(batch_xyz)])
            # B, M, 3
            # batch_seed_point = batch_xyz[:, -1:, :]
            # patch_num = 1
            patch_num = int(num_point / k * 5)
            # idx = tf.random_uniform([batch_size, patch_num], minval=0, maxval=num_point, dtype=tf.int32)
            idx = tf.squeeze(farthest_point_sample(patch_num, batch_xyz),
                             axis=0)
            # idx = tf.random_uniform([patch_num], minval=0, maxval=tf.shape(batch_xyz)[1], dtype=tf.int32)
            # B, P, 3 -> B, k, 3 (idx B, k, 1)
            # idx = tf.Print(idx, [idx], message="idx")
            batch_seed_point = tf.gather(batch_xyz, idx, axis=1)
            k = tf.minimum(k, tf.shape(batch_xyz)[1])
            # batch_seed_point = gather_point(batch_xyz, idx)
        # B, M, k, 2
        _, new_patch_idx = knn_point(k,
                                     batch_xyz,
                                     batch_seed_point,
                                     unique=False)
        # B, M, k, 3
        batch_xyz = tf.gather_nd(batch_xyz, new_patch_idx)
        # MB, k, 3
        batch_xyz = tf.concat(tf.unstack(batch_xyz, axis=1), axis=0)
    if batch_features is not None:
        with tf.name_scope("extract_feature"):
            batch_features = tf.gather_nd(batch_features, new_patch_idx)
            batch_features = tf.concat(tf.unstack(batch_features, axis=1),
                                       axis=0)
    if is_training and (gt_xyz is not None and gt_k is not None):
        with tf.name_scope("extract_gt"):
            _, new_patch_idx = knn_point(gt_k,
                                         gt_xyz,
                                         batch_seed_point,
                                         unique=False)
            gt_xyz = tf.gather_nd(gt_xyz, new_patch_idx)
            gt_xyz = tf.concat(tf.unstack(gt_xyz, axis=1), axis=0)
    else:
        gt_xyz = None

    return batch_xyz, batch_features, gt_xyz
예제 #22
0
def lfnet_module(kernel,
                 scale,
                 interp,
                 fit,
                 xyz,
                 points,
                 normals,
                 axis_x,
                 axis_y,
                 xyz_feature,
                 npoint,
                 radius_list,
                 nsample_list,
                 mlp_list,
                 is_training,
                 bn_decay,
                 scope,
                 mlp=[64, 64],
                 bn=True,
                 use_xyz=False,
                 weight=None,
                 knn=0,
                 d=1,
                 end=False,
                 use_xyz_feature=True,
                 first_layer=False):
    ''' A-CNN module with rings
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            normals: (batch_size, ndataset, 3) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius_list: list of float32 -- search radiuses (inner and outer) represent ring in local region
            nsample_list: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    # data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        # if npoint == xyz.get_shape().as_list()[1] and knn==0:
        #     raise Exception('wrong input knn and npoint')
        if npoint != xyz.get_shape().as_list()[1]:
            indecies = farthest_point_sample(npoint, xyz)
            new_xyz = gather_point(xyz, indecies)  # (batch_size, npoint, 3)
            new_normals = gather_point(normals,
                                       indecies)  # (batch_size, npoint, 3)
            new_axis_x = gather_point(axis_x, indecies)
            new_axis_y = gather_point(axis_y, indecies)
        elif knn:
            new_xyz = xyz
            new_normals = normals
            new_axis_x = axis_x
            new_axis_y = axis_y
        else:
            indecies = tf.range(npoint)
            indecies = tf.tile(tf.expand_dims(indecies, 0),
                               [xyz.get_shape().as_list()[0], 1])
            new_xyz = xyz
            new_normals = normals
            new_axis_x = axis_x
            new_axis_y = axis_y

        batch_size = xyz.get_shape()[0].value
        new_points_list = []

        for i in range(len(nsample_list)):
            radius = radius_list[i]
            print(radius)
            nsample = nsample_list[i]
            nk = kernel.get_shape().as_list()[0]
            kernel = kernel
            sita = scale
            if knn == 1:
                radius = 0

            _, idx = knn_point(nsample, xyz, new_xyz, d=d[i])

            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])

            if weight is None:
                _, proj, _, kernel_out, weight, kernel_fit = transform_neighbors(
                    nsample, idx, xyz, new_xyz, new_normals, new_axis_x,
                    new_axis_y, kernel, sita, interp, fit, radius)
                proj = relative_pos_encoding(proj)
                if interp != 2:
                    # weight=tf.nn.softmax(weight,axis=-2)
                    weight = weight / tf.reduce_sum(
                        weight, axis=-2, keep_dims=True)
                weight = tf.expand_dims(weight, 3)
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = proj
            ########### addition xyz features
            if use_xyz_feature:
                if xyz_feature is None:
                    xyz_feature = proj
                else:
                    xyz_feature = group_point(xyz_feature, idx)
                edge_feature = proj

                edge_feature = tf_util.conv2d(edge_feature,
                                              mlp[0], [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=bn,
                                              is_training=is_training,
                                              scope='xyz_feature_%d' % (0),
                                              bn_decay=bn_decay)
                edge_feature = tf_util.conv2d(edge_feature,
                                              mlp[0], [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=bn,
                                              is_training=is_training,
                                              scope='xyz_feature_%d' % (1),
                                              bn_decay=bn_decay)
                output_feature = tf.concat([xyz_feature, edge_feature],
                                           axis=-1)
                if end == False:
                    xyz_feature = tf_util.conv2d(output_feature,
                                                 mlp[-1], [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 is_training=is_training,
                                                 scope='xyz_feature2',
                                                 bn_decay=bn_decay)
                    # we can try sum and mean
                    xyz_feature = tf.reduce_max(xyz_feature,
                                                axis=[2],
                                                keep_dims=True,
                                                name='maxpool')
                    xyz_feature = tf.squeeze(xyz_feature, [2])
            if use_xyz_feature:
                grouped_points = tf.concat([grouped_points, output_feature],
                                           axis=-1)
            #ASFConv,加一下for
            if first_layer:
                grouped_points = tf_util.conv2d(grouped_points,
                                                mlp_list[i][0], [1, 1],
                                                padding='VALID',
                                                stride=[1, 1],
                                                bn=bn,
                                                is_training=is_training,
                                                scope='conv%d_%d' % (i, 0),
                                                bn_decay=bn_decay)
            # Discrete Conv
            new_points = DiscreteConv(grouped_points, mlp_list, bn, i,
                                      is_training, bn_decay, weight, nk,
                                      kernel_fit)
            new_points_list.append(new_points)
        new_points = tf.concat(new_points_list, axis=-1)

        if first_layer:
            return new_xyz, new_points, new_normals, new_axis_x, new_axis_y, kernel_out, weight, kernel_fit, xyz_feature
        else:
            return new_xyz, new_points, new_normals, new_axis_x, new_axis_y, _, weight, _, xyz_feature
예제 #23
0
파일: model_utils.py 프로젝트: zeta1999/3PU
    pc = []
    for b in range(4):
        gt.append(pc_util.load(gt_files[b])[np.newaxis, :, :3])
        pc.append(pc_util.load(pc_files[b])[np.newaxis, :, :3])

    import pdb
    pdb.set_trace()
    gt = np.concatenate(gt, axis=0)
    pc = np.concatenate(pc, axis=0)

    # fetcher = Fetcher(input_data, label, radius, batch_size=10,
    #     step_ratio=4, up_ratio=16, num_in_point=1024)
    gt = tf.constant(gt, dtype=tf.float32)
    pred = tf.constant(pc, dtype=tf.float32)
    # covariance matrix
    _, idx = knn_point(5, gt, pred)
    # [B, P, k, 3]
    grouped = tf.gather_nd(gt, idx)
    # B, P, 1, 3 and B, P, 3, 3
    b, cov_mat = covariance_matrix(grouped)
    # eigenvalue non-decreasing B, P, 3  B, P, 3, 3
    e, v = tf.self_adjoint_eig(cov_mat)
    # normals B, P, 3
    if is_2D:
        normals = v[:, :, :, 1]
    else:
        normals = v[:, :, :, 0]
    normals = tf.nn.l2_normalize(normals, axis=-1)

    rel_pred = pred - tf.squeeze(b, axis=2)
    # projection B, P
예제 #24
0
def pool(xyz, points, k, npoint):
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    _, idx = knn_point(k, xyz, new_xyz)
    new_points = tf.reduce_max(group_point(points, idx), axis=2)

    return new_xyz, new_points