Пример #1
0
    def visualize(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            samples = glob(input_folder, recursive=True)
            samples.sort()
            if len(samples) > 100:
                samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                mid = data[(np.abs(data[:, 2]) < np.amax(data[:, 2]) * 0.2), :]
                idx = farthest_point_sample(5, mid[np.newaxis, ...]).eval()[0]
                # idx = np.random.choice(data.shape[0], 5, replace=False)
                patches = pc_util.extract_knn_patch(mid[idx, :], data,
                                                    NUM_POINT)
                end = time.time()
                print("total time: ", end - start)
                path = os.path.join(save_path,
                                    point_path.split('/')[-1][:-4] + '.ply')
                total_levels = int(np.log2(UP_RATIO) / np.log2(STEP_RATIO))
                for p in range(patches.shape[0]):
                    patch = patches[p]
                    for i in range(1, total_levels + 1):
                        patch_result = self.patch_prediction(
                            patch, sess, STEP_RATIO**i)
                        pc_util.save_ply(
                            (patch_result * furthest_distance) + centroid,
                            path[:-4] + "_p_%d_%d.ply" % (p, i))
                    pc_util.save_ply((patch * furthest_distance) + centroid,
                                     path[:-4] + "_p_%d_%d.ply" % (p, 0))
                pc_util.save_ply((data * furthest_distance) + centroid,
                                 path[:-4] + "_input.ply")
Пример #2
0
def extract_patches(batch_xyz, k,patch_num=1,batch_features=None, gt_xyz=None, gt_k=None, is_training=None):
    """
    :param batch_xyz [B, P, 3]
    """
    batch_size, num_point, _ = batch_xyz.shape.as_list()
    with tf.name_scope("extract_input"):
        if is_training:
            use_random = False
            if patch_num>1:
                batch_seed_point = gather_point(batch_xyz, farthest_point_sample(patch_num, batch_xyz))
            else:
                # B, 1, 3
                idx = tf.random_uniform([batch_size, patch_num], minval=0, maxval=num_point, dtype=tf.int32)
                # idx = tf.constant(250, shape=[batch_size, 1], dtype=tf.int32)
                batch_seed_point = gather_point(batch_xyz, idx)
                #patch_num = 1
        else:
            assert(batch_size == 1)
            # remove residual, (B P 1) and (B, P, 1, 2)
            closest_d, _ = knn_point_2(2, batch_xyz, batch_xyz, unique=False)
            closest_d = closest_d[:, :, 1:]
            # (B, P)
            mask = tf.squeeze(closest_d < 5*(tf.reduce_mean(closest_d, axis=1, keepdims=True)), axis=-1)
            # filter (B, P', 3)
            batch_xyz = tf.expand_dims(tf.boolean_mask(batch_xyz, mask), axis=0)
            # batch_xyz = tf.Print(batch_xyz, [tf.shape(batch_xyz)])
            # B, M, 3
            # batch_seed_point = batch_xyz[:, -1:, :]
            # patch_num = 1
            patch_num = int(num_point / k * 5)
            # idx = tf.random_uniform([batch_size, patch_num], minval=0, maxval=num_point, dtype=tf.int32)
            idx = tf.squeeze(farthest_point_sample(patch_num, batch_xyz), axis=0)
            # idx = tf.random_uniform([patch_num], minval=0, maxval=tf.shape(batch_xyz)[1], dtype=tf.int32)
            # B, P, 3 -> B, k, 3 (idx B, k, 1)
            # idx = tf.Print(idx, [idx], message="idx")
            batch_seed_point = tf.gather(batch_xyz, idx, axis=1)
            k = tf.minimum(k, tf.shape(batch_xyz)[1])
            # batch_seed_point = gather_point(batch_xyz, idx)
        # B, M, k, 2
        _, new_patch_idx = knn_point_2(k, batch_xyz, batch_seed_point, unique=False)
        # B, M, k, 3
        batch_xyz = tf.gather_nd(batch_xyz, new_patch_idx)
        # MB, k, 3
        batch_xyz = tf.concat(tf.unstack(batch_xyz, axis=1), axis=0)
    if batch_features is not None:
        with tf.name_scope("extract_feature"):
            batch_features = tf.gather_nd(batch_features, new_patch_idx)
            batch_features = tf.concat(tf.unstack(batch_features, axis=1), axis=0)
    if is_training and (gt_xyz is not None and gt_k is not None):
        with tf.name_scope("extract_gt"):
            _, new_patch_idx = knn_point_2(gt_k, gt_xyz, batch_seed_point, unique=False)
            gt_xyz = tf.gather_nd(gt_xyz, new_patch_idx)
            gt_xyz = tf.concat(tf.unstack(gt_xyz, axis=1), axis=0)
    else:
        gt_xyz = None

    return batch_xyz, batch_features, gt_xyz
Пример #3
0
    def pc_prediction(self, pc):
        ## get patch seed from farthestsampling
        points = tf.convert_to_tensor(np.expand_dims(pc, axis=0),
                                      dtype=tf.float32)
        start = time()
        print('------------------patch_num_point:', self.opts.patch_num_point)
        seed1_num = int(pc.shape[0] / self.opts.patch_num_point *
                        self.opts.patch_num_ratio)

        ## FPS sampling
        seed = farthest_point_sample(seed1_num, points).eval()[0]
        seed_list = seed[:seed1_num]
        print("farthest distance sampling cost", time() - start)
        print("number of patches: %d" % len(seed_list))
        input_list = []
        up_point_list = []

        patches = pc_util.extract_knn_patch(pc[np.asarray(seed_list), :], pc,
                                            self.opts.patch_num_point)

        for point in tqdm(patches, total=len(patches)):
            up_point = self.patch_prediction(point)
            up_point = np.squeeze(up_point, axis=0)
            input_list.append(point)
            up_point_list.append(up_point)

        return input_list, up_point_list
Пример #4
0
def pc_prediction_eval(pc, sess, ops, args, patch_num_ratio, ratio):
    num_shape_point = pc.shape[0]
    ## FPS sampling
    points = tf.convert_to_tensor(np.expand_dims(pc, axis=0), dtype=tf.float32)
    seed1_num = int(num_shape_point / args.num_point * args.patch_num_ratio)
    seed = farthest_point_sample(seed1_num, points).eval()[0]
    seed_list = seed[:seed1_num]

    print("number of patches: %d" % len(seed_list))
    input_list = []
    up_point2_list = []
    up_point1_list = []

    patches = pc_util.extract_knn_patch(pc[np.asarray(seed_list), :], pc,
                                        args.num_point)
    for i in tqdm(range(len(patches))):
        point = patches[i]
        up_point2, up_point1 = patch_prediction_eval(point, sess, ops, args,
                                                     ratio)

        input_list.append(point)
        up_point2_list.append(up_point2)
        up_point1_list.append(up_point1)

    return input_list, up_point2_list, up_point1_list
Пример #5
0
def get_uniform_loss(pcd,
                     percentages=[0.004, 0.006, 0.008, 0.010, 0.012],
                     radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        disk_area = math.pi * (radius**2) * p / nsample
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        #expect_len =  tf.sqrt(2*disk_area/1.732)#using hexagon
        expect_len = tf.sqrt(disk_area)  # using square

        grouped_pcd = group_point(pcd, idx)
        grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)

        var, _ = knn_point(2, grouped_pcd, grouped_pcd)
        uniform_dis = -var[:, :, 1:]
        uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8))
        uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1])
        uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
        uniform_dis = tf.reshape(uniform_dis, [-1])

        mean, variance = tf.nn.moments(uniform_dis, axes=0)
        mean = mean * math.pow(p * 100, 2)
        #nothing 4
        loss.append(mean)
    return tf.add_n(loss) / len(percentages)
Пример #6
0
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Пример #7
0
def set_abstraction_msg(xyz, points, npoint, radius_list, nsample_list,
                        mlp_list, is_training, use_nchw):
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    new_points_list = []
    for i in range(len(radius_list)):
        radius = radius_list[i]
        nsample = nsample_list[i]
        group_idx = query_ball_point(radius, nsample, xyz, new_xyz)
        grouped_xyz = group_point(xyz, group_idx[0])
        grouped_xyz -= K.tile(
            Lambda(lambda x: K.expand_dims(x, axis=2))(new_xyz),
            [1, 1, nsample, 1])
        if points is not None:
            grouped_points = group_point(points, group_idx[0])
            grouped_points = Lambda(lambda x: K.concatenate(x, axis=-1))(
                [grouped_points, grouped_xyz])
        else:
            grouped_points = grouped_xyz
        if use_nchw:
            grouped_points = Lambda(lambda x: K.permute_dimensions(
                x, [0, 3, 1, 2]))(grouped_points)
        for j, num_out_channel in enumerate(mlp_list[i]):
            grouped_points = Conv2D(num_out_channel, 1,
                                    activation="relu")(grouped_points)
            grouped_points = BatchNormalization()(grouped_points,
                                                  training=is_training)
        if use_nchw:
            grouped_points = Lambda(lambda x: K.permute_dimensions(
                x, [0, 2, 3, 1]))(grouped_points)
        new_points = Lambda(lambda x: K.max(x, axis=2))(grouped_points)
        new_points_list.append(new_points)
    new_points_concat = Lambda(lambda x: K.concatenate(x, axis=-1))(
        new_points_list)
    return new_xyz, new_points_concat
Пример #8
0
def farthest_point_sample(points, npoints):
    fps_indices = tf_sampling.farthest_point_sample(npoints, points)
    batch_indices = tf.tile(
        tf.reshape(tf.range(tf.shape(points)[0]), (-1, 1, 1)), (1, npoints, 1))
    indices = tf.concat(
        [batch_indices, tf.expand_dims(fps_indices, -1)], axis=-1)

    return indices
Пример #9
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    '''
    input data: xyz.shape[0] (N)*(d+C);
    a set of centroids: npoint (N')*d
    neighbors: nsample (K)*(d+C)
    farthest_point_sample output npoint's index.
    gather_point: output npoint 's data according to index and input data
    '''
    # aaa = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3)
    # print('xys:', new_xyz.get_shape())
    # print('new_xyz in s g:', new_xyz.get_shape(), 'npoint:', npoint)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        # K‘ flexiable, but less than nsample, paper not refered
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Пример #10
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           ibn=False,
                           use_xyz=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.expand_dims(new_xyz, 2)
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util2.conv2d(grouped_points,
                                                 num_out_channel, [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 ibn=ibn,
                                                 is_training=is_training,
                                                 scope='conv%d_%d' % (i, j),
                                                 bn_decay=bn_decay)
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Пример #11
0
def subsample(points, feat, targetnum, kp_idx):
    if kp_idx is not None:
        kp_indices = kp_idx
    else:
        kp_indices = farthest_point_sample(targetnum, points)
        kp_indices = tf.expand_dims(kp_indices, 2)
    feat_sampled = group_point(feat, kp_indices)
    feat_sampled = tf.squeeze(feat_sampled, 2)
    xyz_sampled = group_point(points, kp_indices)
    xyz_sampled = tf.squeeze(xyz_sampled, 2)
    return xyz_sampled, feat_sampled, kp_indices
Пример #12
0
    def test(self):

        self.inputs = tf.placeholder(tf.float32,
                                     shape=[1, self.opts.patch_num_point, 3])
        is_training = tf.placeholder_with_default(False,
                                                  shape=[],
                                                  name='is_training')
        Gen = Generator(self.opts, is_training, name='generator')
        self.pred_pc = Gen(self.inputs)
        for i in range(round(math.pow(self.opts.up_ratio, 1 / 4)) - 1):
            self.pred_pc = Gen(self.pred_pc)

        saver = tf.train.Saver()
        print("****** phrase test ******")

        ##restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(self.opts.log_dir)
        ##to use pretrained model comment the line above
        checkpoint_path = "/home/alitokur/Softwares/PU-GAN/model/model-100"
        print(checkpoint_path)
        saver.restore(self.sess, checkpoint_path)

        samples = glob(self.opts.test_data)
        point = pc_util.load(samples[0])
        self.opts.num_point = point.shape[0]
        out_point_num = int(self.opts.num_point * self.opts.up_ratio)

        for point_path in samples:
            logging.info(point_path)
            start = time()
            pc = pc_util.load(point_path)[:, :3]
            pc, centroid, furthest_distance = pc_util.normalize_point_cloud(pc)

            if self.opts.jitter:
                pc = pc_util.jitter_perturbation_point_cloud(
                    pc[np.newaxis, ...],
                    sigma=self.opts.jitter_sigma,
                    clip=self.opts.jitter_max)
                pc = pc[0, ...]

            input_list, pred_list = self.pc_prediction(pc)

            end = time()
            print("total time: ", end - start)
            pred_pc = np.concatenate(pred_list, axis=0)
            pred_pc = (pred_pc * furthest_distance) + centroid

            pred_pc = np.reshape(pred_pc, [-1, 3])
            path = os.path.join(self.opts.out_folder,
                                point_path.split('/')[-1][:-4] + '.ply')
            idx = farthest_point_sample(out_point_num, pred_pc[np.newaxis,
                                                               ...]).eval()[0]
            pred_pc = pred_pc[idx, 0:3]
            np.savetxt(path[:-4] + '.xyz', pred_pc, fmt='%.6f')
Пример #13
0
    def pc_prediction(self, gm, sess, patch_num_ratio=3, edge_threshold=0.05):
        ## get patch seed from farthestsampling
        points = tf.convert_to_tensor(np.expand_dims(gm.data,axis=0),dtype=tf.float32)
        start= time.time()
        seed1_num = int(gm.data.shape[0] / (NUM_POINT/2) * patch_num_ratio)

        ## FPS sampling
        seed = farthest_point_sample(seed1_num*2, points).eval()[0]
        seed_list = seed[:seed1_num]
        print "farthest distance sampling cost", time.time() - start
        ratios = np.random.uniform(1.0,1.0,size=[seed1_num])

        input_list = []
        up_point_list=[]
        up_edge_list = []
        up_edgedist_list = []
        fail = 0
        for seed,ratio in tqdm(zip(seed_list,ratios)):
            try:
                patch_size = int(NUM_POINT * ratio)
                idx = np.asarray(gm.bfs_knn(seed,patch_size))
                # idx = np.asarray(gm.geodesic_knn(seed,patch_size))
                if len(idx)<NUM_POINT:
                    fail = fail + 1
                    continue
                idx1 = np.random.permutation(idx.shape[0])[:NUM_POINT]
                idx1.sort()
                idx = idx[idx1]
                point = gm.data[idx]
            except:
                fail= fail+1
                continue
            up_point,up_edgepoint,up_edgedist = self.patch_prediction(point, sess,ratio,edge_threshold)

            input_list.append(point)
            up_point_list.append(up_point)
            up_edge_list.append(up_edgepoint)
            up_edgedist_list.append(up_edgedist)
        print "total %d fails" % fail

        input = np.concatenate(input_list,axis=0)
        pred = np.concatenate(up_point_list,axis=0)

        pred_edge = np.concatenate(up_edge_list, axis=0)
        print "total %d edgepoint" % pred_edge.shape[0]
        pred_edgedist = np.concatenate(up_edgedist_list,axis=0)
        rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
        pred_edge = np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1)))

        return input, pred, pred_edge
Пример #14
0
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        if np.isscalar(radius):
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            tf.summary.histogram('pts_cnt', pts_cnt)
        else:
            idx_list = []
            for radius_one, xyz_one, new_xyz_one in zip(tf.unstack(radius,axis=0), tf.unstack(xyz, axis=0),tf.unstack(new_xyz, axis=0)):
                idx_one, pts_cnt = query_ball_point(radius_one, nsample, tf.expand_dims(xyz_one, axis=0), tf.expand_dims(new_xyz_one, axis=0))
                idx_list.append(idx_one)
            idx = tf.stack(idx_list, axis=0)
            idx = tf.squeeze(idx, axis=1)

    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]),grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
            new_points = tf.concat([grouped_xyz, grouped_points],axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        # new_points =  tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])], axis=-1)
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Пример #15
0
def sample_points(xyz, npoint):
    '''
    :param xyz:
    :param npoint:
    :param knn:
    :param use_xyz:
    :return: new_xyz - Cluster centers
    '''

    if npoint <= 0:
        new_xyz = tf.identity(xyz)
    else:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))

    return new_xyz
Пример #16
0
def eval_whole_model(sess, ops, args, up_ratio, path_input, path_output):
    # get necessary parameters
    num_point = args.num_point
    num_shape_point = args.num_shape_point
    patch_num_ratio = 3

    if not os.path.exists(path_output):
        os.makedirs(path_output)

    # obtain xyz file from path_input
    pcs_input = glob(path_input + "/*.xyz")
    num_pcs = len(pcs_input)
    print('total obj %d' % num_pcs)

    for i, path_input_xyz in enumerate(pcs_input):
        pc_input = np.loadtxt(path_input_xyz)
        name_obj = path_input_xyz.split('/')[-1]  # with .xyz
        pc_input = pc_input[:, 0:3]
        pc_input_normed, centroid, scale = pc_util.normalize_point_cloud(
            pc_input)

        # obtain patch prediction
        input_list, pred2_list, pred1_list = pc_prediction_eval(
            pc_input_normed,
            sess,
            ops,
            args,
            patch_num_ratio=patch_num_ratio,
            ratio=up_ratio)

        # formulate patch prediction to full model by fps
        pred2_normed = np.concatenate(pred2_list, axis=0)
        idx = farthest_point_sample(num_shape_point * up_ratio,
                                    pred2_normed[np.newaxis, ...]).eval()[0]
        pred2_normed = pred2_normed[idx, 0:3]
        pred2 = (pred2_normed * scale) + centroid

        # save xyz
        save_path = os.path.join(path_output, 'input_' + name_obj)
        np.savetxt(save_path, pc_input)
        save_path = os.path.join(path_output, 'pred_' + name_obj)
        np.savetxt(save_path, pred2)
Пример #17
0
    def __call__(self, inputs):
        with tf.variable_scope(self.name, reuse=self.reuse):

            features = ops.feature_extraction(inputs,
                                              scope='feature_extraction',
                                              is_training=self.is_training,
                                              bn_decay=None)

            H = ops.up_projection_unit(features,
                                       self.up_ratio_real,
                                       scope="up_projection_unit",
                                       is_training=self.is_training,
                                       bn_decay=None)

            coord = ops.conv2d(H,
                               64, [1, 1],
                               padding='VALID',
                               stride=[1, 1],
                               bn=False,
                               is_training=self.is_training,
                               scope='fc_layer1',
                               bn_decay=None)

            coord = ops.conv2d(coord,
                               3, [1, 1],
                               padding='VALID',
                               stride=[1, 1],
                               bn=False,
                               is_training=self.is_training,
                               scope='fc_layer2',
                               bn_decay=None,
                               activation_fn=None,
                               weight_decay=0.0)
            outputs = tf.squeeze(coord, [2])

            outputs = gather_point(
                outputs, farthest_point_sample(self.out_num_point, outputs))
        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           self.name)
        return outputs
Пример #18
0
def get_uniform_loss2(
        pcd,
        percentages=[0.002, 0.004, 0.006, 0.008, 0.010, 0.012, 0.015],
        radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        uniform_val = tf.py_func(py_uniform_loss, [pcd, idx, pts_cnt, r],
                                 tf.float32)

        loss.append(uniform_val * math.sqrt(p * 100))
    return tf.add_n(loss) / len(percentages)
Пример #19
0
    def pc_prediction(self,
                      gm,
                      sess,
                      patch_num_ratio=3,
                      edge_threshold=0.05,
                      edge=None):
        ## get patch seed from farthestsampling
        points = tf.convert_to_tensor(np.expand_dims(gm.data, axis=0),
                                      dtype=tf.float32)
        start = time.time()
        seed1_num = int(gm.data.shape[0] / (NUM_POINT / 8) * patch_num_ratio)

        ## FPS sampling
        seed = farthest_point_sample(seed1_num * 2, points).eval()[0]
        seed_list = seed[:seed1_num]
        print "farthest distance sampling cost", time.time() - start

        if edge is None:
            ratios = np.random.uniform(1.0, 1.0, size=[seed1_num])
        else:
            edge_tree = spatial.cKDTree(edge)
            seed_data = gm.data[np.asarray(seed_list)]
            seed_tree = spatial.cKDTree(seed_data)
            indics = seed_tree.query_ball_tree(edge_tree, r=0.02)
            ratios = []
            cnt = 0
            for item in indics:
                if len(item) >= 3:
                    #ratios.append(np.random.uniform(1.0,2.0))
                    ratios.append(1.0)
                    cnt = cnt + 1
                else:
                    # ratios.append(np.random.uniform(1.0,3.0))
                    ratios.append(3.0)
            print "total %d edge patch" % (cnt)
        ######
        mm1 = {}
        mm2 = {}
        mm3 = {}
        # for i in xrange(gm.data.shape[0]):
        for i in xrange(100):
            mm1[i] = []
            mm2[i] = []
            mm3[i] = []
        ######
        input_list = []
        up_point_list = []
        up_edge_list = []
        up_edgedist_list = []
        fail = 0
        for seed, ratio in tqdm(zip(seed_list, ratios)):
            try:
                patch_size = int(NUM_POINT * ratio)
                idx = np.asarray(gm.bfs_knn(seed, patch_size))
                if len(idx) < NUM_POINT:
                    fail = fail + 1
                    continue
                idx1 = np.random.permutation(idx.shape[0])[:NUM_POINT]
                idx1.sort()
                idx = idx[idx1]
                point = gm.data[idx]
            except:
                fail = fail + 1
                continue
            up_point, up_edgepoint, up_edgedist = self.patch_prediction(
                point, sess, ratio, edge_threshold)

            # ## handle with the points of same point
            # for cnt, item in enumerate(idx[:128]):
            #     if item <10000:
            #         mm1[item].append(up_point[cnt])
            #         mm2[item].append(up_point[cnt+128])
            #         mm3[item].append(up_point[cnt+128*2])
            #         # mm[item].append(up_point[cnt+128*3])
            # ########
            input_list.append(point)
            up_point_list.append(up_point)
            up_edge_list.append(up_edgepoint)
            up_edgedist_list.append(up_edgedist)
        print "total %d fails" % fail

        # ##
        # colors = np.random.randint(0,255,(10000,3))
        # color_point = []
        # for item in mm1.keys():
        #     aa = np.asarray(mm1[item])
        #     if len(aa)==0:
        #         continue
        #     aa = np.concatenate([aa,np.tile(colors[item],(len(aa),1))],axis=-1)
        #     color_point.extend(aa)
        # color_point = np.asarray(color_point)
        # data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'1.txt',color_point)
        #
        # color_point = []
        # for item in mm2.keys():
        #     aa = np.asarray(mm2[item])
        #     if len(aa) == 0:
        #         continue
        #     aa = np.concatenate([aa, np.tile(colors[item], (len(aa), 1))], axis=-1)
        #     color_point.extend(aa)
        # color_point = np.asarray(color_point)
        # data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'2.txt', color_point)
        #
        # color_point = []
        # for item in mm3.keys():
        #     aa = np.asarray(mm3[item])
        #     if len(aa) == 0:
        #         continue
        #     aa = np.concatenate([aa, np.tile(colors[item], (len(aa), 1))], axis=-1)
        #     color_point.extend(aa)
        # color_point = np.asarray(color_point)
        # data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'3.txt', color_point)
        # ##

        input = np.concatenate(input_list, axis=0)
        pred = np.concatenate(up_point_list, axis=0)

        pred_edge = np.concatenate(up_edge_list, axis=0)
        print "total %d edgepoint" % pred_edge.shape[0]
        pred_edgedist = np.concatenate(up_edgedist_list, axis=0)
        rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
        pred_edge = np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1)))

        return input, pred, pred_edge
Пример #20
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True,
                     keypoints=None,
                     orientations=None,
                     normalize_radius=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        keypoints: None or tensor with shape [None, None, 3], containing the xyz of keypoints.
                   If provided, npoint will be ignored, and iterative furthest sampling will be skipped
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor, i.e. cluster center (dim=3)
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor (dim=3+c, first 3 dimensions are normalized XYZ)
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions. This is usually the first 3 dimensions of new_points
    '''

    end_points = {}

    if keypoints is not None:
        new_xyz = keypoints
    else:
        new_xyz = gather_point(xyz, farthest_point_sample(
            npoint, xyz))  # (batch_size, npoint, 3)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
        pts_cnt = nsample  # Hack. By right should make sure number of input points < nsample
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = grouped_xyz - tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if normalize_radius:
        grouped_xyz /= radius

    end_points['grouped_xyz_before'] = grouped_xyz

    # 2D-rotate via orientations if necessary
    if orientations is not None:
        cosval = tf.cos(orientations)
        sinval = tf.sin(orientations)
        one = tf.ones_like(cosval)
        zero = tf.zeros_like(cosval)
        R = tf.stack([(cosval, sinval, zero), (-sinval, cosval, zero),
                      (zero, zero, one)],
                     axis=0)
        R = tf.transpose(R, perm=[2, 3, 0, 1])
        grouped_xyz = tf.matmul(grouped_xyz, R)
        end_points['rotation'] = R

    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    end_points['grouped_xyz'] = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz, end_points
Пример #21
0
    def test_hierarical_prediction(self):
        data_folder = '../../PointSR_data/CAD_imperfect/simu_noise'
        phase = data_folder.split('/')[-2] + "_" + data_folder.split('/')[-1]
        save_path = os.path.join(MODEL_DIR, 'result/' + phase)
        self.saver = tf.train.Saver()
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        print restore_model_path

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            self.saver.restore(sess, restore_model_path)
            total_time = 0
            samples = glob(data_folder + "/*.xyz")
            samples.sort()
            for point_path in samples:
                print point_path
                edge_path = point_path.replace('simu_noise', 'mesh_edge')
                edge_path = edge_path.replace('_noise.xyz', '_edge.xyz')
                gm = GKNN(point_path,
                          edge_path,
                          patch_size=NUM_POINT,
                          patch_num=30,
                          add_noise=False)
                l = gm.edge.shape[0]
                idxx = range(l) * (NUM_EDGE / l) + list(
                    np.random.permutation(l)[:NUM_EDGE % l])
                edge = gm.edge[idxx]

                ## get patch seed from farthestsampling
                points = tf.convert_to_tensor(np.expand_dims(gm.data, axis=0),
                                              dtype=tf.float32)
                start = time.time()
                seed = farthest_point_sample(gm.data.shape[0] / 2,
                                             points).eval()[0]
                print "aaaaa", time.time() - start

                seed1_num = int(gm.data.shape[0] / NUM_POINT * 30)
                seed_list1 = seed[:seed1_num]
                seed_left = seed[seed1_num:]

                # seed2_num = int(gm.data.shape[0] / NUM_POINT * 1)
                # seed_list2 = gm.get_seed_fromdensity(seed2_num)
                # seed_list = np.concatenate([seed_list1, seed_list2])
                seed_list = np.unique(seed_list1)

                inputs = []
                up_point_list = []
                up_edge_list = []
                up_edgedist_list = []
                input_edge_list = []
                input_edgedist_list = []
                fail = 0
                for seed in tqdm(seed_list):
                    try:
                        patch_size = NUM_POINT * np.random.randint(1, 5)
                        point = gm.bfs_knn(seed, patch_size)
                        idx = np.random.permutation(patch_size)[:NUM_POINT]
                        idx.sort()
                        point = point[idx]
                    except:
                        fail = fail + 1
                        continue

                    #get the idx
                    idx1 = np.reshape(np.arange(NUM_POINT), [1, NUM_POINT])
                    idx0 = np.zeros((1, NUM_POINT))
                    idx = np.stack((idx0, idx1), axis=-1)

                    up_point, up_edgepoint, up_edgedist, input_edge, input_edgedist = self.patch_prediction(
                        point, edge, idx, sess)
                    inputs.append(point)
                    up_point_list.append(up_point)
                    up_edge_list.append(up_edgepoint)
                    up_edgedist_list.append(up_edgedist)
                    input_edge_list.append(input_edge)
                    input_edgedist_list.append(input_edgedist)
                print "total %d fails" % fail

                input = np.concatenate(inputs, axis=0)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_input.xyz")
                data_provider.save_xyz(path, gm.data)

                input_edge = np.concatenate(input_edge_list, axis=0)
                input_edgedist = np.concatenate(input_edgedist_list, axis=0)
                rgba = data_provider.convert_dist2rgba(input_edgedist,
                                                       scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_inputedge.ply")
                data_provider.save_ply(
                    path,
                    np.hstack((input_edge, rgba, input_edgedist.reshape(-1,
                                                                        1))))

                pred = np.concatenate(up_point_list, axis=0)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_output.xyz")
                data_provider.save_xyz(path, pred)

                pred_edge = np.concatenate(up_edge_list, axis=0)

                t1 = time.time()
                print "total %d edgepoint" % pred_edge.shape[0]
                edge_dist = np.zeros(pred_edge.shape[0])
                for sid in range(0, pred_edge.shape[0], 20000):
                    eid = np.minimum(pred_edge.shape[0], sid + 20000)
                    tf_point = tf.placeholder(tf.float32, [1, eid - sid, 3])
                    tf_edge = tf.placeholder(tf.float32,
                                             [1, gm.edge.shape[0], 6])
                    pred_edge_dist_tf = model_utils.distance_point2edge(
                        tf_point, tf_edge)
                    pred_edge_dist_tf = tf.sqrt(
                        tf.reduce_min(pred_edge_dist_tf, axis=-1))
                    edge_dist[sid:eid] = sess.run(pred_edge_dist_tf,
                                                  feed_dict={
                                                      tf_point:
                                                      np.expand_dims(
                                                          pred_edge[sid:eid],
                                                          axis=0),
                                                      tf_edge:
                                                      np.expand_dims(gm.edge,
                                                                     axis=0)
                                                  })
                t3 = time.time()
                print "tf time %f" % (t3 - t1)
                rgba = data_provider.convert_dist2rgba(edge_dist, scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_outputedgeerror.ply")
                data_provider.save_ply(
                    path, np.hstack((pred_edge, rgba, edge_dist.reshape(-1,
                                                                        1))))

                pred_edgedist = np.concatenate(up_edgedist_list, axis=0)
                rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_outputedge.ply")
                data_provider.save_ply(
                    path,
                    np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1))))

            print total_time / len(samples)
Пример #22
0
    def completion(self, inputs, is_training):
        num_point = inputs.get_shape()[1].value
        l0_xyz = inputs[:,:,0:3]
        l0_points = None

        is_training = is_training
        bradius = 1.0
        use_bn = False
        use_ibn = False
        bn_decay = 0.95
        up_ratio = 8

        self.grid_size = 2
        self.num_coarse = int(num_point * up_ratio / 4)

        with tf.variable_scope('encoder_0', reuse=tf.AUTO_REUSE):
            l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=num_point,
                                                               radius=bradius * 0.05, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer1')

            l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=num_point / 2,
                                                               radius=bradius * 0.1, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[64, 64, 128], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer2')

            l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=num_point / 4,
                                                               radius=bradius * 0.2, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[128, 128, 256], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer3')

            l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=num_point / 8,
                                                               radius=bradius * 0.3, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[256, 256, 512], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer4')

            l5_xyz, l5_points, l5_indices = pointnet_sa_module(l4_xyz, l4_points, npoint=num_point / 16,
                                                               radius=bradius * 0.4, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[512, 512, 1024], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer5')

            gl_xyz, gl_points, gl_indices = pointnet_sa_module(l5_xyz, l5_points, npoint=1,
                                                               radius=bradius * 0.3, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[512, 512, 1024], mlp2=None,
                                                               group_all=True,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer6')

            gl_feature = tf.reduce_max(gl_points, axis=1)

            print('gl_feature', gl_feature)

            # Feature Propagation layers

            up_gl_points = pointnet_fp_module(l0_xyz, gl_xyz, None, gl_points, [64], is_training, bn_decay,
                                              scope='fa_layer0', bn=use_bn, ibn=use_ibn)

            up_l5_points = pointnet_fp_module(l0_xyz, l5_xyz, None, l5_points, [64], is_training, bn_decay,
                                              scope='fa_layer1', bn=use_bn, ibn=use_ibn)

            up_l4_points = pointnet_fp_module(l0_xyz, l4_xyz, None, l4_points, [64], is_training, bn_decay,
                                              scope='fa_layer2', bn=use_bn, ibn=use_ibn)

            up_l3_points = pointnet_fp_module(l0_xyz, l3_xyz, None, l3_points, [64], is_training, bn_decay,
                                              scope='fa_layer3', bn=use_bn, ibn=use_ibn)

            up_l2_points = pointnet_fp_module(l0_xyz, l2_xyz, None, l2_points, [64], is_training, bn_decay,
                                              scope='fa_layer4', bn=use_bn, ibn=use_ibn)

            ###concat feature
        with tf.variable_scope('up_layer', reuse=tf.AUTO_REUSE):
            new_points_list = []
            for i in range(up_ratio):
                if i>3:
                    transform = input_transform_net(l0_xyz, is_training, bn_decay, K=3)
                    xyz_transformed = tf.matmul(l0_xyz, transform)

                    concat_feat = tf.concat([up_gl_points, up_gl_points-up_l5_points, up_gl_points-up_l4_points, up_gl_points-up_l3_points, up_gl_points-up_l2_points, up_gl_points-l1_points, xyz_transformed],
                                            axis=-1)
                    print('concat_feat1', concat_feat)
                else:
                    concat_feat = tf.concat([up_gl_points, up_l5_points, up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz],
                                            axis=-1)
                    print('concat_feat2', concat_feat)
                #concat_feat = tf.concat([up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz], axis=-1)
                concat_feat = tf.expand_dims(concat_feat, axis=2)
                concat_feat = tf_util2.conv2d(concat_feat, 256, [1, 1],
                                              padding='VALID', stride=[1, 1],
                                              bn=False, is_training=is_training,
                                              scope='fc_layer0_%d' % (i), bn_decay=bn_decay)

                new_points = tf_util2.conv2d(concat_feat, 128, [1, 1],
                                             padding='VALID', stride=[1, 1],
                                             bn=use_bn, is_training=is_training,
                                             scope='conv_%d' % (i),
                                             bn_decay=bn_decay)
                new_points_list.append(new_points)
            net = tf.concat(new_points_list, axis=1)

            coord_feature = tf_util2.conv2d(net, 64, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer1', bn_decay=bn_decay)


            coord = tf_util2.conv2d(coord_feature, 3, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer2', bn_decay=bn_decay,
                                    activation_fn=None, weight_decay=0.0)  # B*(2N)*1*3

            coarse_highres = tf.squeeze(coord, [2])  # B*(2N)*3
            coord_feature = tf.squeeze(coord_feature, [2])
            fps_idx = farthest_point_sample(int(self.num_fine)/2, coarse_highres)
            coord_feature = gather_point(coord_feature, fps_idx)
            coarse_fps = gather_point(coarse_highres, fps_idx)

            coord_feature = tf.expand_dims(coord_feature, 2)

            print('coord_feature', coord, coord_feature)

            score = tf_util2.conv2d(coord_feature, 16, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer3', bn_decay=bn_decay)

            score = tf_util2.conv2d(score, 8, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer4', bn_decay=bn_decay)

            score = tf_util2.conv2d(score, 1, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer5', bn_decay=bn_decay)

            score = tf.nn.softplus(score)
            score = tf.squeeze(score, [2,3])

            _, idx = tf.math.top_k(score, self.num_coarse)

            coarse = gather_point(coarse_fps, idx)

            coord_feature = tf.squeeze(coord_feature, [2])
            coord_feature = gather_point(coord_feature, idx)

            print('coarse', coord_feature, coarse)


        with tf.variable_scope('folding', reuse=tf.AUTO_REUSE):
            grid = tf.meshgrid(tf.linspace(-0.05, 0.05, self.grid_size), tf.linspace(-0.05, 0.05, self.grid_size))
            print('grid:', grid)
            grid = tf.expand_dims(tf.reshape(tf.stack(grid, axis=2), [-1, 2]), 0)
            print('grid:', grid)
            grid_feat = tf.tile(grid, [coarse.shape[0], self.num_coarse, 1])
            print('grid_feat', grid_feat)

            point_feat = tf.tile(tf.expand_dims(tf.concat([coarse, coord_feature], axis=-1), 2), [1, 1, self.grid_size ** 2, 1])
            point_feat = tf.reshape(point_feat, [coarse.shape[0], self.num_fine, -1])
            print('point_feat', point_feat)

            global_feat = tf.tile(tf.expand_dims(gl_feature, 1), [1, self.num_fine, 1])

            #print('global_feat', global_feat)

            feat = tf.concat([grid_feat, point_feat, global_feat], axis=2)
            print('feat:', feat)

            center = tf.tile(tf.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])
            center = tf.reshape(center, [-1, self.num_fine, 3])

            print('center', center)

            fine = mlp_conv(feat, [512, 512, 3]) + center
            print('fine:', fine)

        return coarse_highres, coarse, fine
Пример #23
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                ####################################
                grouped_points = Ops.xxlu(Ops.conv2d(grouped_points,
                                                     k=(1, 1),
                                                     out_c=num_out_channel,
                                                     str=1,
                                                     pad='VALID',
                                                     name='lll' + str(i)),
                                          label='lrelu')
                #grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                #padding='VALID', stride=[1,1], bn=bn, is_training=is_training,scope='conv%d_%d'%(i,j), bn_decay=bn_decay)

            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Пример #24
0
    def test_hierarical_prediction(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            total_time = 0
            samples = glob(input_folder, recursive=True)
            samples.sort()
            # if len(samples)>100:
            #     samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                input_list, pred_list = self.pc_prediction(
                    data, sess, patch_num_ratio=PATCH_NUM_RATIO)
                end = time.time()
                print("total time: ", end - start)
                pred_pc = np.concatenate(pred_list, axis=0)
                pred_pc = (pred_pc * furthest_distance) + centroid
                data = (data * furthest_distance) + centroid
                folder = os.path.basename(os.path.dirname(point_path))
                path = os.path.join(save_path, folder,
                                    point_path.split('/')[-1][:-4] + '.ply')
                # pc_util.save_ply(pred_pc, path[:-4]+'_overlapped.ply')
                pc_util.save_ply(data, path[:-4] + '_input.ply')
                idx = farthest_point_sample(
                    int(num_shape_point * FLAGS.drop_out) * UP_RATIO,
                    pred_pc[np.newaxis, ...]).eval()[0]
                pred_pc = pred_pc[idx, 0:3]
                # pred_pc, _, _ = pc_util.normalize_point_cloud(pred_pc)
                # pred_pc = (pred_pc * furthest_distance) + centroid
                pc_util.save_ply(pred_pc, path[:-4] + '.ply')

                # if len(input_list) > 1:
                #     counter = 0
                #     for in_p, pred_p in zip(input_list, pred_list):
                #         pc_util.save_ply(in_p*furthest_distance+centroid, path[:-4]+"_input_patch_%d.ply" % counter)
                #         pc_util.save_ply(pred_p*furthest_distance+centroid, path[:-4]+"_pred_patch_%d.ply" % counter)
                #         counter += 1

            print(total_time / len(samples))
Пример #25
0
def main(arg):
    # define placeholder
    training_pl = tf.placeholder(tf.bool, shape=())

    input_sparse_xyz_pl = tf.placeholder(tf.float32, shape=(arg.batch_size, arg.num_point, 3))
    gt_sparse_normal_pl = tf.placeholder(tf.float32, shape=(arg.batch_size, arg.num_point, 3))
    gt_dense_xyz_pl = tf.placeholder(tf.float32, shape=(arg.batch_size, arg.num_point*arg.up_ratio, 3))
    gt_dense_normal_pl = tf.placeholder(tf.float32, shape=(arg.batch_size, arg.num_point*arg.up_ratio, 3))
    input_r_pl = tf.placeholder(tf.float32, shape=(arg.batch_size))

    shape_sparse_xyz_pl = tf.placeholder(tf.float32, shape=[1, arg.num_shape_point, 3])
    shape_ddense_xyz_pl = tf.placeholder(tf.float32, shape=[1, arg.num_point*arg.up_ratio*arg.num_patch, 3])

    # generated point cloud
    gen_dense_xyz, gen_dense_normal, gen_sparse_normal = upsample_model.get_model(input_sparse_xyz_pl, arg.up_ratio, training_pl, knn=30, bradius=input_r_pl, scope='generator')
    
    # fps index
    fps_idx1 = farthest_point_sample(arg.num_patch, shape_sparse_xyz_pl)
    fps_idx2 = farthest_point_sample(arg.num_shape_point*arg.up_ratio, shape_ddense_xyz_pl)

    # loss function
    loss_dense_cd, cd_idx1, cd_idx2 = loss.cd_loss(gen_dense_xyz, gt_dense_xyz_pl, input_r_pl)
    loss_dense_normal = loss.abs_dense_normal_loss(gen_dense_normal, gt_dense_normal_pl, cd_idx1, cd_idx2, input_r_pl)
    loss_sparse_normal = loss.abs_sparse_normal_loss(gen_sparse_normal, gt_sparse_normal_pl, input_r_pl)

    loss_all = 100 * loss_dense_cd  + arg.reg_normal1* loss_dense_normal + arg.reg_normal2* loss_sparse_normal + tf.losses.get_regularization_loss()
    
    # optimizer
    bn_decay = 0.95
    step = tf.Variable(0,trainable=False)

    gen_update_ops = [op for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if op.name.startswith("generator")]
    gen_tvars = [var for var in tf.trainable_variables() if var.name.startswith("generator")]
    param_size = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])

    with tf.control_dependencies(gen_update_ops):
        train_op = tf.train.AdamOptimizer(arg.learning_rate,beta1=0.9).minimize(loss_all,var_list=gen_tvars, global_step=step)
    
    # load data
    dataloader = provider.Fetcher(arg.train_record, batch_size=arg.batch_size, step_ratio=arg.up_ratio, up_ratio=arg.up_ratio, num_in_point=arg.num_point, num_shape_point=arg.num_shape_point, jitter=True, drop_out=1.0, jitter_max=arg.jitter_max, jitter_sigma=arg.jitter_sigma)

    # define ops
    ops = {'training_pl': training_pl,
            'input_sparse_xyz_pl': input_sparse_xyz_pl,
            'gt_sparse_normal_pl': gt_sparse_normal_pl,
            'gt_dense_xyz_pl': gt_dense_xyz_pl,
            'gt_dense_normal_pl': gt_dense_normal_pl,
            'input_r_pl': input_r_pl,
            'shape_sparse_xyz_pl': shape_sparse_xyz_pl,
            'shape_ddense_xyz_pl': shape_ddense_xyz_pl,
            'gen_dense_xyz': gen_dense_xyz,
            'gen_dense_normal': gen_dense_normal,
            'gen_sparse_normal': gen_sparse_normal,
            'fps_idx1': fps_idx1,
            'fps_idx2': fps_idx2,
            'loss_dense_cd': loss_dense_cd,
            'loss_dense_normal': loss_dense_normal,
            'loss_sparse_normal': loss_sparse_normal,
            'loss_all': loss_all,
            'train_op': train_op,
            'step': step
        }

    # create sess
    config = tf.ConfigProto()
    config.allow_soft_placement = True
    config.log_device_placement = False
    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init, {training_pl: True})
        saver = tf.train.Saver(max_to_keep=10)
        tf.get_default_graph().finalize()

        # if train phase
        if arg.phase == 'train':
            # loop for epoch
            for epoch in tqdm(range(arg.max_epoch+1)):
                train_one_epoch(arg, sess, ops, dataloader)
            # save model
            saver.save(sess, os.path.join(arg.model_path, "model"), global_step=epoch)
            # save xyz files
            eval_shapes(arg, sess, ops, arg.up_ratio, arg.eval_xyz)

        # if eval phase
        if arg.phase == 'test':
            # load model
            saver.restore(sess, arg.pretrained)
            # save xyz files
            eval_shapes(arg, sess, ops, arg.up_ratio, arg.eval_xyz)
Пример #26
0
def pool(xyz, points, k, npoint):
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    _, idx = knn_point(k, xyz, new_xyz)
    new_points = tf.reduce_max(group_point(points, idx), axis=2)

    return new_xyz, new_points