예제 #1
0
def create_archive(cat_desc, shape=(192, 256), n_images=8, example_ids=None):
    import os
    from shapenet.core import cat_desc_to_id
    from shapenet.core import get_example_ids
    from shapenet.core.blender_renderings.config import RenderConfig
    from progress.bar import IncrementalBar
    import zipfile
    cat_id = cat_desc_to_id(cat_desc)
    if example_ids is None or len(example_ids) == 0:
        example_ids = get_example_ids(cat_id)
    config = RenderConfig(shape=shape, n_images=n_images)
    zip_path = config.get_zip_path(cat_id)
    with zipfile.ZipFile(zip_path, mode='a', allowZip64=True) as zf:
        bar = IncrementalBar(max=len(example_ids))
        for example_id in example_ids:
            example_dir = config.get_example_dir(cat_id, example_id)
            if not os.path.isdir(example_dir):
                print('No directory at %s' % example_dir)
            else:
                for fn in os.listdir(example_dir):
                    src = os.path.join(example_dir, fn)
                    dst = os.path.join(cat_id, example_id, fn)
                    zf.write(src, dst)
            bar.next()
        bar.finish()
예제 #2
0
def get_data(model_id, example_ids=None):
    edge_length_threshold = 0.02
    builder = get_builder(model_id)
    cat_id = builder.cat_id

    with get_ffd_dataset(cat_id, edge_length_threshold=0.02) as ffd_ds:
        template_ids, bs, ps = zip(*builder.get_ffd_data(ffd_ds))

    with get_template_mesh_dataset(cat_id, edge_length_threshold) as mesh_ds:
        faces = [np.array(mesh_ds[e]['faces']) for e in template_ids]

    predictions_ds = get_predictions_dataset(model_id)
    mesh_ds = get_mesh_dataset(cat_id)
    image_ds = RenderConfig().get_dataset(cat_id, builder.view_index)
    zipped = Dataset.zip(predictions_ds, mesh_ds, image_ds)
    with zipped:
        if example_ids is None:
            example_ids = list(predictions_ds.keys())
            random.shuffle(example_ids)
        for example_id in example_ids:
            print(example_id)
            pred, mesh, image = zipped[example_id]
            i = np.argmax(pred['probs'])
            dp = np.array(pred['dp'][i])
            b = bs[i]
            p = ps[i]
            yield example_id, b, p, dp, faces[i], mesh, image
예제 #3
0
def vis(cat, n_images, view_index=5, example_ids=None):
    import matplotlib.pyplot as plt
    from shapenet.core import cat_desc_to_id, get_example_ids
    from shapenet.core.blender_renderings.config import RenderConfig
    cat_id = cat_desc_to_id(cat)
    config = RenderConfig(n_images=n_images)
    dataset = config.get_dataset(cat_id, view_index)
    if example_ids is not None and len(example_ids) > 0:
        dataset = dataset.subset(example_ids)
    else:
        example_ids = get_example_ids(cat_id)
    with dataset:
        for example_id in example_ids:
            plt.imshow(dataset[example_id])
            plt.title(example_id)
            plt.show()
예제 #4
0
def get_ds(cat_desc, regime='e'):
    view_index = 5
    edge_length_threshold = 0.02

    cat_id = cat_desc_to_id(cat_desc)
    model_id = '%s_%s' % (regime, cat_desc)

    image_ds = RenderConfig().get_dataset(cat_id, view_index)
    cloud_ds = get_cloud_manager(
        model_id, pre_sampled=True, n_samples=n_samples).get_lazy_dataset()
    mesh_ds = get_inferred_mesh_dataset(
        model_id, edge_length_threshold=edge_length_threshold)
    gt_mesh_ds = get_mesh_dataset(cat_id)
    voxel_ds = get_voxel_dataset(
        model_id, edge_length_threshold=edge_length_threshold, filled=False)
    selected_template_ds = get_selected_template_idx_dataset(model_id)

    template_meshes = []
    with gt_mesh_ds:
        for template_id in get_template_ids(cat_id):
            mesh = gt_mesh_ds[template_id]
            template_meshes.append(
                {k: np.array(mesh[k]) for k in ('vertices', 'faces')})

    template_mesh_ds = selected_template_ds.map(lambda i: template_meshes[i])

    return Dataset.zip(
        image_ds, gt_mesh_ds, cloud_ds, mesh_ds, voxel_ds, template_mesh_ds)
예제 #5
0
def get_image_dataset(cat_ids, example_ids, view_indices, render_config=None):
    from shapenet.image import with_background
    from dids.core import BiKeyDataset
    if render_config is None:
        from shapenet.core.blender_renderings.config import RenderConfig
        render_config = RenderConfig()
    if isinstance(cat_ids, str):
        cat_ids = [cat_ids]
        example_ids = [example_ids]
    if isinstance(view_indices, int):
        view_indices = [view_indices]
    datasets = {
        c: render_config.get_multi_view_dataset(
            c, view_indices=view_indices, example_ids=eid)
        for c, eid in zip(cat_ids, example_ids)}
    dataset = BiKeyDataset(datasets).map(
        lambda image: with_background(image, 255))
    dataset = dataset.map_keys(
        lambda key: (key[0], (key[1], key[2])),
        lambda key: (key[0],) + key[1])
    return dataset
예제 #6
0
def check_zip(cat_desc, shape, n_images):
    import zipfile
    from shapenet.core.blender_renderings.config import RenderConfig
    from shapenet.core import cat_desc_to_id, get_example_ids
    cat_id = cat_desc_to_id(cat_desc)

    config = RenderConfig(shape=shape, n_images=n_images)
    rendered_ids = set()
    with zipfile.ZipFile(config.get_zip_path(cat_id)) as zf:
        for name in zf.namelist():
            rendered_ids.add(name.split('/')[1])

    not_rendered_count = 0
    example_ids = get_example_ids(cat_id)
    for example_id in example_ids:
        if example_id not in rendered_ids:
            print(example_id)
            not_rendered_count += 1

    if not_rendered_count > 0:
        print('%d / %d not rendered' % (not_rendered_count, len(example_ids)))
    else:
        print('All %d %ss rendered!' % (len(example_ids), cat_desc))
예제 #7
0
def vis_clouds(model_id,
               pre_sampled=True,
               n_samples=1024,
               edge_length_threshold=0.1,
               shuffle=False):
    import random
    import numpy as np
    from mayavi import mlab
    import matplotlib.pyplot as plt
    from dids import Dataset
    from shapenet.core.blender_renderings.config import RenderConfig
    from shapenet.core.meshes import get_mesh_dataset
    from util3d.mayavi_vis import vis_point_cloud
    from util3d.mayavi_vis import vis_mesh
    from template_ffd.data.ids import get_example_ids
    from template_ffd.inference.clouds import get_inferred_cloud_dataset
    from template_ffd.model import get_builder
    builder = get_builder(model_id)
    cat_id = builder.cat_id
    kwargs = dict(model_id=model_id, n_samples=n_samples)
    if not pre_sampled:
        kwargs['edge_length_threshold'] = edge_length_threshold
    cloud_dataset = get_inferred_cloud_dataset(pre_sampled=pre_sampled,
                                               **kwargs)
    image_dataset = RenderConfig().get_dataset(cat_id, builder.view_index)

    example_ids = get_example_ids(cat_id, 'eval')
    if shuffle:
        example_ids = list(example_ids)
        random.shuffle(example_ids)
    mesh_dataset = get_mesh_dataset(cat_id)
    zipped_dataset = Dataset.zip(image_dataset, cloud_dataset, mesh_dataset)
    # zipped_dataset = Dataset.zip(image_dataset, cloud_dataset)
    with zipped_dataset:
        for example_id in example_ids:
            image, cloud, mesh = zipped_dataset[example_id]
            # image, cloud = zipped_dataset[example_id]
            plt.imshow(image)
            vis_point_cloud(np.array(cloud),
                            color=(0, 1, 0),
                            scale_factor=0.01)
            v, f = (np.array(mesh[k]) for k in ('vertices', 'faces'))
            vis_mesh(v,
                     f,
                     color=(0, 0, 1),
                     opacity=0.1,
                     include_wireframe=False)
            plt.show(block=False)
            mlab.show()
            plt.close()
예제 #8
0
 def get_inputs(self, mode, repeat=None):
     from shapenet.core.blender_renderings.config import RenderConfig
     from ..data.ids import get_example_ids
     render_config = RenderConfig()
     view_index = self.params.get('view_index',
                                  range(render_config.n_images))
     cat_ids = self.cat_ids
     example_ids = tuple(
         get_example_ids(cat_id, mode) for cat_id in cat_ids)
     if repeat is None:
         repeat = mode == tf.estimator.ModeKeys.TRAIN
     dataset = get_tf_dataset(render_config,
                              view_index,
                              cat_ids,
                              example_ids,
                              batch_size=self.batch_size,
                              shuffle=True,
                              repeat=repeat)
     return dataset.make_one_shot_iterator().get_next()
예제 #9
0
 def render_config(self):
     from shapenet.core.blender_renderings.config import RenderConfig
     return RenderConfig(**self.params.get('render_params', {}))
예제 #10
0
        file_index = get_file_index(zip_file)
        for example_id in example_ids:
            bar.next()
            render_example(
                config, cat_id, example_id, zip_file,
                overwrite, call_kwargs, blender_path=blender_path,
                verbose=verbose, file_index=file_index)
    bar.finish()


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('cat', type=str)
    parser.add_argument('--shape', type=int, nargs=2, default=[192, 256])
    parser.add_argument('--scale', type=float, default=None)
    parser.add_argument('--blender_path', type=str, default='blender')
    parser.add_argument('-n', '--n_images', type=int, default=8)
    parser.add_argument('-d', '--debug', action='store_true')
    parser.add_argument('-r', '--reverse', action='store_true')
    parser.add_argument('-o', '--overwrite', action='store_true')
    parser.add_argument('-i', '--example_ids', nargs='*')
    parser.add_argument('-f', '--fixed_meshes', action='store_true')
    parser.add_argument('-v', '--verbose', action='store_true')
    args = parser.parse_args()
    config = RenderConfig(args.shape, args.n_images, args.scale)
    cat_id = cat_id = cat_desc_to_id(args.cat)
    render_cat(config, cat_id, args.overwrite, args.reverse, args.debug,
               args.example_ids, args.fixed_meshes, args.blender_path,
               args.verbose)
예제 #11
0
def vis_segmentations(model_id,
                      example_ids=None,
                      vis_mesh=False,
                      edge_length_threshold=0.02,
                      include_wireframe=False,
                      save=False):
    from scipy.misc import imsave
    if save and example_ids is None:
        raise ValueError('Cannot save without specifying example_ids')
    builder = get_builder(model_id)
    cat_id = builder.cat_id
    if example_ids is None:
        example_ids = example_ids = get_example_ids(cat_id, 'eval')
    if vis_mesh:
        segmented_fn = builder.get_segmented_mesh_fn(edge_length_threshold)
    else:
        segmented_fn = builder.get_segmented_cloud_fn()
    config = RenderConfig()

    with get_predictions_dataset(model_id) as predictions:
        with config.get_dataset(cat_id, builder.view_index) as image_ds:
            for example_id in example_ids:
                example = predictions[example_id]
                probs, dp = (np.array(example[k]) for k in ('probs', 'dp'))
                result = segmented_fn(probs, dp)
                if result is not None:
                    image = image_ds[example_id]
                    print(example_id)
                    segmentation = result['segmentation']
                    if vis_mesh:
                        vertices = result['vertices']
                        faces = result['faces']
                        original_points = result['original_points']
                        original_seg = result['original_segmentation']
                        f0 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_segmented_mesh(vertices,
                                           segmented_cloud(
                                               faces, segmentation),
                                           include_wireframe=include_wireframe,
                                           opacity=0.2)
                        f1 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_clouds(
                            segmented_cloud(original_points, original_seg))
                    else:
                        points = result['points']
                        original_points = result['original_points']
                        f0 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_clouds(segmented_cloud(points, segmentation))
                        f1 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_clouds(
                            segmented_cloud(original_points, segmentation))

                    if save:
                        folder = os.path.join(_paper_dir, 'segmentations',
                                              model_id, example_id)
                        if not os.path.isdir(folder):
                            os.makedirs(folder)
                        fn = 'inferred_%s.png' % ('mesh'
                                                  if vis_mesh else 'cloud')
                        p0 = os.path.join(folder, fn)
                        mlab.savefig(p0, figure=f0)
                        p1 = os.path.join(folder, 'annotated_cloud.png')
                        mlab.savefig(p1, figure=f1)
                        pi = os.path.join(folder, 'query_image.png')
                        imsave(pi, image)
                        mlab.close()
                    else:
                        plt.imshow(image)
                        plt.show(block=False)
                        mlab.show()
                        plt.close()
예제 #12
0
from template_ffd.data.ids import get_example_ids
from template_ffd.model import get_builder

regime = 'e'
cat_desc = 'chair'
view_index = 5
edge_length_threshold = 0.02

shuffle = True
k = 3

cat_id = cat_desc_to_id(cat_desc)
model_id = '%s_%s' % (regime, cat_desc)
builder = get_builder(model_id)

image_ds = RenderConfig().get_dataset(cat_id, view_index)
gt_mesh_ds = get_mesh_dataset(cat_id)
predictions_ds = get_predictions_dataset(model_id)

top_k_mesh_fn = builder.get_prediction_to_top_k_mesh_fn(
    edge_length_threshold, k)

all_ds = Dataset.zip(image_ds, gt_mesh_ds, predictions_ds)


def vis():
    def vis_mesh(mesh, include_wireframe=False, **kwargs):
        from util3d.mayavi_vis import vis_mesh as vm
        v, f = (np.array(mesh[k]) for k in ('vertices', 'faces'))
        vm(v, f, include_wireframe=include_wireframe, **kwargs)
예제 #13
0
import os
import matplotlib.pyplot as plt
from shapenet.image import with_background
from shapenet.core.blender_renderings.config import RenderConfig
from shapenet.core import cat_desc_to_id, get_example_ids


cat_desc = 'plane'
view_index = 5
config = RenderConfig()
view_angle = config.view_angle(view_index)
cat_id = cat_desc_to_id(cat_desc)
example_ids = get_example_ids(cat_id)

path = config.get_zip_path(cat_id)
if not os.path.isfile(path):
    raise IOError('No renderings at %s' % path)

with config.get_dataset(cat_id, view_index) as ds:
    ds = ds.map(lambda image: with_background(image, 255))
    for example_id in ds:
        image = ds[example_id]
        plt.imshow(image)
        plt.show()
예제 #14
0
def vis_segmentations(
        model_id, example_ids=None, vis_mesh=False,
        edge_length_threshold=0.02, include_wireframe=False,
        save=False):
    from scipy.misc import imsave
    if save and example_ids is None:
        raise ValueError('Cannot save without specifying example_ids')
    builder = get_builder(model_id)
    cat_id = builder.cat_id
    if example_ids is None:
        example_ids = example_ids = get_example_ids(cat_id, 'eval')
    if vis_mesh:
        segmented_fn = builder.get_segmented_mesh_fn(edge_length_threshold)
    else:
        segmented_fn = builder.get_segmented_cloud_fn()
    config = RenderConfig()

    with get_predictions_dataset(model_id) as predictions:
        with config.get_dataset(cat_id, builder.view_index) as image_ds:
            for example_id in example_ids:
                example = predictions[example_id]
                probs, dp = (np.array(example[k]) for k in ('probs', 'dp'))
                result = segmented_fn(probs, dp)
                if result is not None:
                    image = image_ds[example_id]
                    print(example_id)
                    segmentation = result['segmentation']
                    if vis_mesh:
                        vertices = result['vertices']
                        faces = result['faces']
                        original_points = result['original_points']
                        original_seg = result['original_segmentation']
                        f0 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_segmented_mesh(
                            vertices, segmented_cloud(faces, segmentation),
                            include_wireframe=include_wireframe,
                            opacity=0.2)
                        f1 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_clouds(
                            segmented_cloud(original_points, original_seg))
                    else:
                        points = result['points']
                        original_points = result['original_points']
                        f0 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_clouds(segmented_cloud(points, segmentation))
                        f1 = mlab.figure(bgcolor=(1, 1, 1))
                        vis_clouds(
                            segmented_cloud(original_points, segmentation))

                    if save:
                        folder = os.path.join(
                            _paper_dir, 'segmentations', model_id, example_id)
                        if not os.path.isdir(folder):
                            os.makedirs(folder)
                        fn = 'inferred_%s.png' % (
                            'mesh' if vis_mesh else 'cloud')
                        p0 = os.path.join(folder, fn)
                        mlab.savefig(p0, figure=f0)
                        p1 = os.path.join(folder, 'annotated_cloud.png')
                        mlab.savefig(p1, figure=f1)
                        pi = os.path.join(folder, 'query_image.png')
                        imsave(pi, image)
                        mlab.close()
                    else:
                        plt.imshow(image)
                        plt.show(block=False)
                        mlab.show()
                        plt.close()
예제 #15
0
    @property
    def output_shape(self):
        return self._config.shape + (3, )

    @property
    def output_type(self):
        return tf.uint8


def get_renderings_dataset(render_config, view_index, cat_id, example_ids):
    manager = RenderingsManager(render_config, view_index, cat_id)
    return base_dataset(example_ids).map(manager.map_tf)


if __name__ == '__main__':
    import matplotlib.pyplot as plt
    from shapenet.core import cat_desc_to_id, get_example_ids
    from shapenet.core.blender_renderings.config import RenderConfig
    cat_desc = 'plane'
    view_index = 5
    config = RenderConfig()
    cat_id = cat_desc_to_id(cat_desc)
    example_ids = get_example_ids(cat_id)
    dataset = get_renderings_dataset(config, view_index, cat_id, example_ids)
    image_tf = dataset.make_one_shot_iterator().get_next()
    with tf.train.MonitoredSession() as sess:
        while not sess.should_stop():
            image = sess.run(image_tf)
            plt.imshow(image)
            plt.show()