Esempio n. 1
0
    def vis():
        import numpy as np
        import trimesh
        tf.compat.v1.enable_eager_execution()
        split = 'train'
        batch_size = 2
        problem = PartnetProblem()
        with problem:
            dataset = problem.get_base_dataset(split).map(pre_batch_map, -1)
            dataset = dataset.batch(batch_size)
            dataset = dataset.map(
                functools.partial(post_batch_map, return_coords=True))

        for features, labels, weights in dataset:
            del labels, weights

            def vis_clouds(in_coords, out_coords, target_index, neighbors):
                out_colors = np.ones((out_coords.shape[0], 3), dtype=np.uint8)
                out_colors *= 128
                out_colors[target_index] = [0, 255, 0]
                out_cloud = trimesh.PointCloud(out_coords, out_colors)
                in_colors = np.zeros((in_coords.shape[0], 3), dtype=np.uint8)

                in_colors[:, 2] = 255
                neigh_indices = neighbors[neighbors[:, 0] == target_index, 1]
                in_colors[neigh_indices] = [255, 0, 0]
                in_cloud = trimesh.PointCloud(in_coords, in_colors)

                scene = in_cloud.scene()
                scene.add_geometry(out_cloud)
                scene.show(background=(0, 0, 0))

            features = tf.nest.map_structure(lambda x: x.numpy(), features)
            all_coords = features['all_coords']
            # sample_indices = features['sample_indices']
            in_place_indices = features['in_place_indices']
            in_place_rel_coords = features['in_place_rel_coords']
            down_sample_indices = features['down_sample_indices']
            down_sample_rel_coords = features['down_sample_rel_coords']
            row_splits = features['row_splits']
            depth = len(all_coords)

            for i in range(depth - 1):
                # down sample
                print(np.max(np.linalg.norm(down_sample_rel_coords[i],
                                            axis=0)))
                print(np.max(np.linalg.norm(in_place_rel_coords[i], axis=0)))
                vis_clouds(all_coords[i], all_coords[i + 1],
                           row_splits[i + 1][-2], down_sample_indices[i])
                # in place
                vis_clouds(all_coords[i + 1], all_coords[i + 1],
                           row_splits[i + 1][-2], in_place_indices[i])
Esempio n. 2
0
    def run_explicit():
        tf.compat.v1.enable_eager_execution()
        tf.config.experimental_run_functions_eagerly(True)
        split = 'train'
        batch_size = 16
        problem = PartnetProblem()
        with problem:
            dataset = problem.get_base_dataset(split).map(pre_batch_map, -1)
            dataset = dataset.batch(batch_size)

        for ex in dataset:
            post_batch_map(*ex)
            break
        print('Finished single run of explicit post_batch_map')
Esempio n. 3
0
    def run_benchmark():
        split = 'train'
        batch_size = 16
        num_warmup = 5
        num_batches = 10
        problem = PartnetProblem()
        with problem:
            dataset = problem.get_base_dataset(split).map(pre_batch_map, -1)
            dataset = dataset.batch(batch_size)
            dataset = dataset.map(post_batch_map, -1).prefetch(-1)

        for i, _ in enumerate(
                tqdm.tqdm(dataset.take(num_warmup + num_batches),
                          total=num_warmup + num_batches,
                          desc='benchmarking')):
            if i == num_warmup:
                t = time()
            if i == num_warmup + num_batches - 1:
                dt = time() - t

        print('{} batches in {} s: {} ms / batch'.format(
            num_batches, dt, dt * 1000 / num_batches))
Esempio n. 4
0
@gin.configurable(blacklist=['input_spec', 'output_spec'])
def res_conv_semantic_segmenter(input_spec,
                                output_spec,
                                dense_factory=mk_layers.Dense,
                                batch_norm_impl=mk_layers.BatchNormalization,
                                activation='relu',
                                filters0=32):
    pass


if __name__ == '__main__':
    import tensorflow_datasets as tfds
    from deep_cloud.problems.partnet import PartnetProblem
    tf.compat.v1.enable_v2_tensorshape()
    problem = PartnetProblem()
    dataset = problem.get_base_dataset(split='validation')

    def test_eager():
        for positions, _ in tfds.as_numpy(dataset):
            compute_edges_eager(positions)
            break
        print('test_pre_batch_map_fn passed')

    def test_pre_batch_map_fn():
        ds = dataset.map(pre_batch_map)
        for _ in tfds.as_numpy(ds):
            break
        print('test_pre_batch_map_fn passed')

    def test_pipeline():
Esempio n. 5
0
    def run_full_benchmark():
        from deep_cloud.ops.np_utils.tree_utils import pykd
        from deep_cloud.ops.np_utils.tree_utils import core

        from deep_cloud.problems.partnet import PartnetProblem
        from tqdm import tqdm
        from time import time

        depth = 6
        num_warmup = 5
        num_iterations = 10
        k0 = 16
        tree_impl = pykd.KDTree
        SQRT_2 = np.sqrt(2.)
        context = PyFuncContext()
        problem = PartnetProblem()

        def f(coords, labels):
            tree = context.py_function(tree_impl, None, coords)
            dists, indices = context.py_function(
                lambda tree: tree.query(tree.data, 2, return_distance=True),
                (tf.TensorSpec(shape=(None, 2), dtype=tf.float32),
                 tf.TensorSpec(shape=(None, ), dtype=tf.int64)), tree)

            scale = tf.reduce_mean(dists[:, 1])
            coords = coords * (2. / scale)

            all_coords = [coords]
            trees = [tree]

            radii = 4 * np.power(2, np.arange(depth))

            flat_indices = []
            row_splits = []
            rel_coords = []
            sample_indices = []

            def add_conv(tree, coords, radius, k0):
                def fn(tree, coords):
                    indices = tree.query_ball_point(coords,
                                                    radius,
                                                    approx_neighbors=k0)
                    rc = np.repeat(coords, indices.row_lengths,
                                   axis=0) - coords[indices.flat_values]
                    rc /= radius
                    return flat_indices, row_splits, rel_coords

                fi, rs, rc = context.py_function(
                    fn, (tf.TensorSpec(shape=(None, ), dtype=tf.int64),
                         tf.TensorSpec(shape=(None, ), dtype=tf.int64),
                         tf.TensorSpec(shape=(None, 3), dtype=tf.float32)),
                    (tree, coords))
                flat_indices.append(fi)
                row_splits.append(rs)
                rel_coords.append(rc)

                # n = tree.n
                # m = coords.shape[0]
                # e = indices.row_splits[-1]
                # lines.append(str((e, n, m, e / n, e / m, radius)))
                return fi, rs

            # initial query in order to do initial rejection sample
            # indices = tree.query_ball_point(coords, radii[0], approx_neighbors=k0)
            indices = context.py_function(
                lambda tree, coords: np.array(
                    core.rejection_sample_active(tree, coords.numpy(),
                                                 radii[0], k0)),
                tf.TensorSpec(shape=(None, ), dtype=tf.int64), tree, coords)
            # indices = np.array(core.rejection_sample_lazy(tree, coords, radii[0], k0))
            sample_indices.append(indices)
            out_coords = tf.gather(coords, indices)
            all_coords.append(out_coords)
            tree = context.py_function(tree_impl, None, out_coords)
            trees.append(tree)
            # initial large down-sample conv
            add_conv(tree, coords, radii[0] * 2, k0 * 4)
            coords = out_coords

            def rejection_sample(flat_indices, row_splits):
                from more_keras.ragged.np_impl import RaggedArray
                ra = RaggedArray.from_row_splits(flat_indices, row_splits)
                return np.array(core.rejection_sample_precomputed(ra),
                                dtype=np.int64)

            for i in range(1, depth - 1):
                # in place
                indices_comp = add_conv(tree, coords, radii[i], k0)
                indices = context.py_function(
                    rejection_sample,
                    tf.TensorSpec(shape=(None, ), dtype=tf.int64),
                    *indices_comp)
                sample_indices.append(indices)
                out_coords = tf.gather(coords, indices)
                all_coords.append(out_coords)
                tree = context.py_function(tree_impl, None, out_coords)
                trees.append(tree)

                # down sample
                # larger radius means number of edges remains roughly constant
                # number of ops remains constant if number of filters doubles
                # also makes more neighbors for unsampling (~4 on average vs ~2)
                add_conv(tree, coords, radii[i] * SQRT_2, k0)
                coords = out_coords

            # final in_place
            add_conv(tree, coords, radii[-1], k0)
            # lines.append('***')
            # print('\n'.join(lines))  # DEBUG
            return (
                tuple(flat_indices),
                tuple(rel_coords),
                tuple(row_splits),
                tuple(all_coords),
                tuple(sample_indices),
            )

        # out = dists, indices

        dataset = problem.get_base_dataset('validation')
        dataset = dataset.map(f, -1)

        out = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
        with tf.Session() as sess:
            for _ in tqdm(range(num_warmup), desc='warming up'):
                sess.run(out)

            t = time()
            for _ in tqdm(range(num_iterations), desc='profiling'):
                sess.run(out)
            dt = time() - t
            print('Ran {} iterations in {} s: {} ms / iteration'.format(
                num_iterations, dt, dt * 1000 / num_iterations))
Esempio n. 6
0
        # print(sess.run(tf.reduce_max(tf.abs(conv0 - conv2))))


def grid_conv_benchmark():
    grid_features = tf.Variable(np.random.normal(size=(8, 64, 64, 64)),
                                dtype=tf.float32)
    grid_conv = tf.keras.layers.Conv2D(64, 3)(grid_features)
    grid_grad, = tf.gradients(grid_conv, grid_features)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        bm = tf.test.Benchmark()
        bm.run_op_benchmark(sess, (grid_conv, grid_grad))


for coords, labels in tfds.as_numpy(
        PartnetProblem().get_base_dataset(split='validation').take(1)):
    break

coords = coords[:4096]

tree = pykd.KDTree(coords)
dists, indices = tree.query(tree.data, 2, return_distance=True)
scale = np.mean(dists[:, 1])
assert (scale > 0)
coords *= (2 / scale)

print('in_place')
in_place_conv_benchmark(coords)
print('subsample')
subsample_conv_benchmark(coords)
print('grid')
Esempio n. 7
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
from time import time
from tqdm import tqdm
import tensorflow_datasets as tfds

from deep_cloud.problems.partnet import PartnetProblem
from deep_cloud.ops.np_utils.tree_utils import pykd

problem = PartnetProblem()
warm_up = 5
benchmark = 10
total = warm_up + benchmark

tree_impl = pykd.KDTree
all_coords = []
for coords, _ in tqdm(tfds.as_numpy(
        problem.get_base_dataset('validation').take(total)),
                      total=total,
                      desc='getting base data...'):
    tree = tree_impl(coords)
    dists, indices = tree.query(tree.data, 2, return_distance=True)
    del indices
    scale = np.mean(dists[:, 1])
    coords *= 2 / scale
    all_coords.append(coords)

Esempio n. 8
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds

from more_keras.ops import utils as op_utils

from deep_cloud.problems.partnet import PartnetProblem
from deep_cloud.ops.np_utils.tree_utils import pykd

for coords, labels in tfds.as_numpy(
        PartnetProblem().get_base_dataset().take(1)):
    break

tree = pykd.KDTree(coords)
dists, indices = tree.query(tree.data, 2, return_distance=True)
scale = np.mean(dists[:, 1])
assert (scale > 0)
coords *= (2 / scale)

tree = pykd.KDTree(coords)
indices = tree.query_ball_point(coords, 4, approx_neighbors=16)

c0 = coords.shape[0]
# nf = 64
nf = 16
nd = 3
features = np.random.uniform(size=(c0, nf, nd))
Esempio n. 9
0
if __name__ == '__main__':
    from time import time
    import functools
    from deep_cloud.problems.partnet import PartnetProblem
    from deep_cloud.models.sparse import preprocess as pp
    import tqdm
    # tf.compat.v1.enable_eager_execution()
    # tf.config.experimental_run_functions_eagerly(True)
    split = 'train'
    batch_size = 16
    # batch_size = 2
    num_classes = 4
    num_warmup = 5
    num_batches = 10
    problem = PartnetProblem()
    with problem:
        dataset = problem.get_base_dataset(split).map(pp.pre_batch_map, -1)
        dataset = dataset.batch(batch_size)
        dataset = dataset.map(pp.post_batch_map, -1).prefetch(-1)

    # for args in dataset.take(1):
    #     if len(args) == 3:
    #         inputs, labels, weights = args
    #     else:
    #         inputs, labels = args
    #     logits = semantic_segmenter_logits(inputs,
    #                                        problem.output_spec.shape[-1])
    # print('Successs!')
    # exit()