Example #1
0
 def rotate(angle, axis):
     rot = transforms.rotation_matrix(angle, axis, deg=True)
     new_cam_xyz = transforms.apply44(rot, camera_xyz[None, :]).ravel()
     new_up = transforms.apply44(rot, up[None, :]).ravel()
     return transforms.lookat_matrix(cam_xyz=new_cam_xyz,
                                     obj_xyz=object_xyz,
                                     up=new_up)
Example #2
0
def make_six_views(camera_xyz, object_xyz, up):
    camera_xyz = np.array(camera_xyz).ravel().astype(np.float64)
    object_xyz = np.array(object_xyz).ravel().astype(np.float64)
    up = np.array(up).ravel().astype(np.float64)

    viewing_dir = object_xyz - camera_xyz
    viewing_dir /= la.norm(viewing_dir)
    left = np.cross(up, viewing_dir)

    # front
    Rt_list = [
        transforms.lookat_matrix(cam_xyz=camera_xyz, obj_xyz=object_xyz, up=up)
    ]

    def rotate(angle, axis):
        rot = transforms.rotation_matrix(angle, axis, deg=True)
        new_cam_xyz = transforms.apply44(rot, camera_xyz[None, :]).ravel()
        new_up = transforms.apply44(rot, up[None, :]).ravel()
        return transforms.lookat_matrix(cam_xyz=new_cam_xyz,
                                        obj_xyz=object_xyz,
                                        up=new_up)

    Rt_list.append(rotate(180, up))  # back
    Rt_list.append(rotate(-90, up))  # left
    Rt_list.append(rotate(90, up))  # right
    Rt_list.append(rotate(90, left))  # top

    # bottom
    rot = transforms.rotation_matrix(90, left, deg=True)
    rot2 = transforms.rotation_matrix(180, viewing_dir, deg=True)
    new_cam_xyz = transforms.apply44(rot2.dot(rot),
                                     camera_xyz[None, :]).ravel()
    new_up = transforms.apply44(rot, up[None, :]).ravel()
    Rt_list.append(
        transforms.lookat_matrix(cam_xyz=new_cam_xyz,
                                 obj_xyz=object_xyz,
                                 up=new_up))

    return Rt_list
def get_Rt_from_RenderForCNN_parameters(params):
    """
    Computes a camera matrix that transforms models coordinates to blender's camera coordinates given RenderForCNN camera parameters.

    Transformation steps in blender:
        ((model coordinates, blender import config) -> blender world coordinates, params) -> blender camera coordinates

    This function:
        (model coordinates, params) -> blender camera coordinates.

    :param params: azimuth, elevation, tilt, distance from origin. All angles are in degrees.
    :return: (3, 4) camera matrix.
    """
    assert len(params) == 4
    assert not isinstance(params[0], (np.ndarray, list, tuple))

    # Same ordering used in RenderForCNN.
    azimuth_deg, elevation_deg, theta_deg, rho = params

    cam_xyz = transforms.sph_to_xyz([rho, 90 - elevation_deg, azimuth_deg],
                                    is_input_radians=False)

    # Blender's default obj import config:
    # up (z):  +y forward (y): -z
    cam_xyz[1], cam_xyz[2] = cam_xyz[2], -cam_xyz[1]

    # In model coordinates, default up vector is y+
    # "lookat" position is assumed to be (0,0,0)
    M0 = transforms.lookat_matrix(cam_xyz=cam_xyz,
                                  obj_xyz=(0, 0, 0),
                                  up=(0, 1, 0))

    M = np.eye(4)
    M[:3, :] = M0
    M0 = M

    T_tilt = transforms.rotation_matrix(-theta_deg,
                                        direction=np.array((0, 0, 1)),
                                        deg=True)

    # Rt: move camera then tilt in camera space.
    M = T_tilt.dot(M0)
    Rt = M[:3]

    assert Rt.shape == (3, 4)
    assert Rt.dtype == np.float64

    return Rt
Example #4
0
 def cam_Rt(self):
     Rt = transforms.lookat_matrix(cam_xyz=self.cam_eye(),
                                   obj_xyz=self.cam_lookat(),
                                   up=self.cam_up())
     return Rt
def main():
    syn_images_dir = '/data/mvshape/shapenetcore/single_rgb_128/'
    shapenetcore_dir = '/data/shapenetcore/ShapeNetCore.v1/'

    log.info('Getting all filenames.')
    syndirs = sorted(glob.glob(path.join(syn_images_dir, '*')))
    filenames = []
    for syndir in syndirs:
        modeldirs = sorted(glob.glob(path.join(syndir, '*')))
        if is_subset:
            modeldirs = modeldirs[:10]
        for modeldir in modeldirs:
            renderings = sorted(glob.glob(path.join(modeldir, '*.png')))
            if is_subset:
                renderings = renderings[:7]
            filenames.extend(renderings)

    # random.seed(42)
    # if not is_subset:
    #     random.shuffle(filenames)
    #     filenames = filenames[:1000000]

    random.seed(42)

    log.info('{} files'.format(len(filenames)))

    # TODO
    target_dir = '/data/mvshape/database'

    if is_subset:
        sqlite_file_path = join(target_dir, 'shapenetcore_subset.sqlite')
    else:
        sqlite_file_path = join(target_dir, 'shapenetcore.sqlite')
    output_cam_distance_from_origin = 2

    log.info('Setting up output directory.')
    # set up debugging directory.
    if path.isfile(sqlite_file_path):
        os.remove(sqlite_file_path)
    io_utils.ensure_dir_exists(target_dir)

    # used for making sure there is no duplicate.
    duplicate_name_check_set = set()

    log.info('Checking for duplicates. And making sure params.txt exists.')
    for i, filename in enumerate(filenames):
        m = re.search(r'single_rgb_128/(.*?)/(.*?)/[^_]+?_[^_]+?_v(\d{4,})_a',
                      filename)
        synset = m.group(1)
        model_name = m.group(2)
        v = m.group(3)
        image_num = int(v)
        vc_rendering_name = '{}_{}_{:04d}'.format(synset, model_name,
                                                  image_num)
        if vc_rendering_name in duplicate_name_check_set:
            print('duplicate found: ', (filename, vc_rendering_name))
        duplicate_name_check_set.add(vc_rendering_name)
        params_filename = join(syn_images_dir, synset, model_name,
                               'params.txt')
        assert path.isfile(params_filename)

    # Create the database
    dbm.init(sqlite_file_path)

    with dbm.db.transaction() as txn:
        log.info('Creating common objects.')
        make_dataset('shapenetcore')

        make_rendering_type('rgb')
        make_rendering_type('depth')
        make_rendering_type('normal')
        make_rendering_type('voxels')

        make_tag('novelview')
        make_tag('novelmodel')
        make_tag('novelclass')
        make_tag('perspective_input')
        make_tag('orthographic_input')
        make_tag('perspective_output')
        make_tag('orthographic_output')
        make_tag('viewer_centered')
        make_tag('object_centered')
        make_tag('real_world')

        make_split('train')
        make_split('test')
        make_split('validation')

        # Quote from http://shapenet.cs.stanford.edu/shapenet/obj-zip/ShapeNetCore.v1/README.txt
        #   "The OBJ files have been pre-aligned so that the up direction is the +Y axis, and the front is the +X axis.  In addition each model is normalized to fit within a unit cube centered at the origin."
        oc_output_cam = camera.OrthographicCamera.from_Rt(
            transforms.lookat_matrix(cam_xyz=(0, 0,
                                              output_cam_distance_from_origin),
                                     obj_xyz=(0, 0, 0),
                                     up=(0, 1, 0)),
            wh=(128, 128),
            is_world_to_cam=True)
        db_oc_output_cam = get_db_camera(oc_output_cam, fov=None)

        # Prepare all category objects.
        log.info('Preparing categories.')
        synset_db_category_map = {}
        for synset, synset_name in synset_name_pairs:
            db_category_i, _ = dbm.Category.get_or_create(name=synset_name)
            synset_db_category_map[synset] = db_category_i

        txn.commit()

        # Prepare all mesh model objects.
        # ---------------------------------------------
        db_object_map = {}
        # model_name -> {rendering_type_name -> rendering}
        db_object_centered_renderings = {}
        log.info('Preparing mesh model objects.')
        start_time = time.time()
        count = 0
        for i, filename in enumerate(filenames):
            m = re.search(
                r'single_rgb_128/(.*?)/(.*?)/[^_]+?_[^_]+?_v(\d{4,})_a',
                filename)
            synset = m.group(1)
            model_name = m.group(2)
            if model_name not in db_object_map:
                mesh_filename = join(shapenetcore_dir, synset, model_name,
                                     'model.obj')
                assert path.isfile(mesh_filename)

                mesh_filename_suffix = join(
                    '/mesh/shapenetcore/v1',
                    '/'.join(mesh_filename.split('/')[-3:]))

                db_category = synset_db_category_map[synset]
                # Must be unique.
                db_object = dbm.Object.create(
                    name=model_name,
                    category=db_category,
                    dataset=datasets['shapenetcore'],
                    num_vertices=0,  # Not needed for now. Easy to fill in later.
                    num_faces=0,
                    mesh_filename=mesh_filename_suffix,
                )
                db_object_map[model_name] = db_object

                oc_rendering_name = '{}_{}'.format(synset, model_name)

                assert model_name not in db_object_centered_renderings
                db_object_centered_renderings[model_name] = {
                    'output_rgb':
                    dbm.ObjectRendering.create(
                        type=rendering_types['rgb'],
                        camera=db_oc_output_cam,
                        object=db_object,
                        # JPG
                        filename='/shapenetcore/mv20_rgb_128/{}.bin'.format(
                            oc_rendering_name),
                        resolution=128,
                        num_channels=3,
                        set_size=20,
                        is_normalized=False,
                    ),
                    'output_depth':
                    dbm.ObjectRendering.create(
                        type=rendering_types['depth'],
                        camera=db_oc_output_cam,
                        object=db_object,
                        # Since there is only one gt rendering per model, their id is the same as the model name.
                        filename='/shapenetcore/mv20_depth_128/{}.bin'.format(
                            oc_rendering_name),
                        resolution=128,
                        num_channels=1,
                        set_size=20,
                        is_normalized=False,
                    ),
                    'output_normal':
                    dbm.ObjectRendering.create(
                        type=rendering_types['normal'],
                        camera=db_oc_output_cam,
                        object=db_object,
                        filename='/shapenetcore/mv20_normal_128/{}.bin'.format(
                            oc_rendering_name),
                        resolution=128,
                        num_channels=3,
                        set_size=20,
                        is_normalized=False,
                    ),
                    'output_voxels':
                    dbm.ObjectRendering.create(
                        type=rendering_types['voxels'],
                        camera=db_oc_output_cam,
                        object=db_object,
                        filename='/shapenetcore/voxels_32/{}.bin'.format(
                            oc_rendering_name),
                        resolution=32,
                        num_channels=1,
                        set_size=1,
                        is_normalized=False,
                    )
                }

                if count % 5000 == 0:
                    txn.commit()
                    t_elapsed = (time.time() - start_time)
                    t_remaining = (t_elapsed / (i + 1) * (len(filenames) - i))
                    log.info(
                        'Creating mesh objects in db. {} of {}. elapsed: {:.1f} min, remaining: {:.1f} min'
                        .format(i, len(filenames), t_elapsed / 60,
                                t_remaining / 60))

                count += 1
        txn.commit()
        t_elapsed = time.time() - start_time
        log.info('created {} mesh objects in db. elapsed: {:.1f} min'.format(
            count, t_elapsed / 60))

        start_time = time.time()

        log.info('Processing rgb images.')
        for i, filename in enumerate(filenames):
            m = re.search(
                r'single_rgb_128/(.*?)/(.*?)/[^_]+?_[^_]+?_v(\d{4,})_a',
                filename)
            synset = m.group(1)
            model_name = m.group(2)
            v = m.group(3)
            image_num = int(v)

            params_filename = join(syn_images_dir, synset, model_name,
                                   'params.txt')
            assert path.isfile(params_filename)

            lines = render_for_cnn_utils.read_params_file(params_filename)
            Rt = render_for_cnn_utils.get_Rt_from_RenderForCNN_parameters(
                lines[image_num])

            # Input and output cameras
            # -------------------
            input_cam = camera.OrthographicCamera.from_Rt(Rt,
                                                          wh=(128, 128),
                                                          is_world_to_cam=True)
            # 49.1343 degrees is the default fov in blender.
            db_input_cam = get_db_camera(input_cam, fov=49.1343)

            input_cam_depth_xyz = (input_cam.pos /
                                   la.norm(input_cam.pos)) * 1.5
            input_cam_depth_Rt = transforms.lookat_matrix(
                cam_xyz=input_cam_depth_xyz,
                obj_xyz=(0, 0, 0),
                up=input_cam.up_vector)
            input_cam_depth = camera.OrthographicCamera.from_Rt(
                input_cam_depth_Rt, wh=(128, 128), is_world_to_cam=True)
            db_input_cam_depth = get_db_camera(input_cam_depth, fov=49.1343)

            output_cam_xyz = (input_cam.pos / la.norm(
                input_cam.pos)) * output_cam_distance_from_origin
            output_Rt = transforms.lookat_matrix(cam_xyz=output_cam_xyz,
                                                 obj_xyz=(0, 0, 0),
                                                 up=input_cam.up_vector)
            vc_output_cam = camera.OrthographicCamera.from_Rt(
                output_Rt, wh=(128, 128), is_world_to_cam=True)
            db_vc_output_cam = get_db_camera(vc_output_cam, fov=None)

            # ---
            db_object = db_object_map[model_name]

            vc_rendering_name = '{}_{}_{:04d}'.format(synset, model_name,
                                                      image_num)

            # Viewer centered renderings.
            # --------------------------------

            # Input rgb image:
            db_object_rendering_input_rgb = dbm.ObjectRendering.create(
                type=rendering_types['rgb'],
                camera=db_input_cam,
                object=db_object,
                # This should already exist.
                filename='/shapenetcore/single_rgb_128/{}.png'.format(
                    vc_rendering_name),
                resolution=128,
                num_channels=1,
                set_size=1,
                is_normalized=False,  # False for rgb.
            )

            db_object_rendering_input_depth = dbm.ObjectRendering.create(
                type=rendering_types['depth'],
                camera=db_input_cam_depth,
                object=db_object,
                filename='/shapenetcore/single_depth_128/{}.bin'.format(
                    vc_rendering_name),
                resolution=128,
                num_channels=1,
                set_size=1,
                is_normalized=True,
            )

            db_object_rendering_vc_output_rgb = dbm.ObjectRendering.create(
                type=rendering_types['rgb'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/shapenetcore/mv20_rgb_128/{}.bin'.format(
                    vc_rendering_name),
                resolution=128,
                num_channels=3,
                set_size=20,
                is_normalized=False,
            )

            db_object_rendering_vc_output_depth = dbm.ObjectRendering.create(
                type=rendering_types['depth'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/shapenetcore/mv20_depth_128/{}.bin'.format(
                    vc_rendering_name),
                resolution=128,
                num_channels=1,
                set_size=20,
                is_normalized=False,
            )

            db_object_rendering_vc_output_normal = dbm.ObjectRendering.create(
                type=rendering_types['normal'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/shapenetcore/mv20_normal_128/{}.bin'.format(
                    vc_rendering_name),
                resolution=128,
                num_channels=3,
                set_size=20,
                is_normalized=False,
            )

            db_object_rendering_vc_output_voxels = dbm.ObjectRendering.create(
                type=rendering_types['voxels'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/shapenetcore/voxels_32/{}.bin'.format(
                    vc_rendering_name),
                resolution=32,
                num_channels=1,
                set_size=1,
                is_normalized=False,
            )

            # Examples
            # ----------------

            # A row in the `Example` table is just an id for many-to-many references.

            # View centered
            example_viewer_centered = dbm.Example.create()
            dbm.ExampleObjectRendering.create(
                example=example_viewer_centered,
                rendering=db_object_rendering_input_rgb)
            dbm.ExampleObjectRendering.create(
                example=example_viewer_centered,
                rendering=db_object_rendering_input_depth)
            dbm.ExampleObjectRendering.create(
                example=example_viewer_centered,
                rendering=db_object_rendering_vc_output_depth)
            dbm.ExampleObjectRendering.create(
                example=example_viewer_centered,
                rendering=db_object_rendering_vc_output_normal)
            dbm.ExampleObjectRendering.create(
                example=example_viewer_centered,
                rendering=db_object_rendering_vc_output_rgb)
            dbm.ExampleObjectRendering.create(
                example=example_viewer_centered,
                rendering=db_object_rendering_vc_output_voxels)
            dbm.ExampleDataset.create(example=example_viewer_centered,
                                      dataset=datasets['shapenetcore'])
            dbm.ExampleSplit.create(example=example_viewer_centered,
                                    split=splits['train'])
            dbm.ExampleTag.create(example=example_viewer_centered,
                                  tag=tags['real_world'])
            dbm.ExampleTag.create(example=example_viewer_centered,
                                  tag=tags['viewer_centered'])
            dbm.ExampleTag.create(example=example_viewer_centered,
                                  tag=tags['perspective_input'])
            dbm.ExampleTag.create(example=example_viewer_centered,
                                  tag=tags['orthographic_output'])
            dbm.ExampleTag.create(example=example_viewer_centered,
                                  tag=tags['novelmodel'])

            # Object centered
            example_object_centered = dbm.Example.create()
            dbm.ExampleObjectRendering.create(
                example=example_object_centered,
                rendering=db_object_rendering_input_rgb)
            dbm.ExampleObjectRendering.create(
                example=example_object_centered,
                rendering=db_object_rendering_input_depth)
            dbm.ExampleObjectRendering.create(
                example=example_object_centered,
                rendering=db_object_centered_renderings[model_name]
                ['output_depth'])
            dbm.ExampleObjectRendering.create(
                example=example_object_centered,
                rendering=db_object_centered_renderings[model_name]
                ['output_normal'])
            dbm.ExampleObjectRendering.create(
                example=example_object_centered,
                rendering=db_object_centered_renderings[model_name]
                ['output_rgb'])
            dbm.ExampleObjectRendering.create(
                example=example_object_centered,
                rendering=db_object_centered_renderings[model_name]
                ['output_voxels'])
            dbm.ExampleDataset.create(example=example_object_centered,
                                      dataset=datasets['shapenetcore'])
            dbm.ExampleSplit.create(example=example_object_centered,
                                    split=splits['train'])
            dbm.ExampleTag.create(example=example_object_centered,
                                  tag=tags['real_world'])
            dbm.ExampleTag.create(example=example_object_centered,
                                  tag=tags['object_centered'])
            dbm.ExampleTag.create(example=example_object_centered,
                                  tag=tags['perspective_input'])
            dbm.ExampleTag.create(example=example_object_centered,
                                  tag=tags['orthographic_output'])
            dbm.ExampleTag.create(example=example_object_centered,
                                  tag=tags['novelmodel'])

            if i % 5000 == 0:
                txn.commit()
                t_elapsed = (time.time() - start_time)
                t_remaining = (t_elapsed / (i + 1) * (len(filenames) - i))
                log.info(
                    'Creating examples in db. {} of {}. elapsed: {:.1f} min, remaining: {:.1f} min'
                    .format(i, len(filenames), t_elapsed / 60,
                            t_remaining / 60))
        txn.commit()

    dbm.db.commit()

    t_elapsed = (time.time() - start_time)
    log.info('total elapsed: {:.1f} min'.format(t_elapsed / 60))
def main():
    with open('/data/mvshape/pascal3d/eval_1500.json', 'r') as f:
        eval_1500 = json.load(f)

    with open('/data/mvshape/pascal3d/validation_1200.json', 'r') as f:
        validation_1200 = json.load(f)

    validation_matfiles = []
    for c, l in validation_1200.items():
        for fname in l:
            validation_matfiles.append(fname)
    test_matfiles = []
    for c, l in eval_1500.items():
        for fname in l:
            test_matfiles.append(fname)

    database_filename = '/data/mvshape/database/pascal3d.sqlite'
    if path.isfile(database_filename):
        os.remove(database_filename)
    dbm.init(database_filename)

    with dbm.db.transaction() as txn:
        make_dataset('pascal3d')
        make_rendering_type('rgb')
        make_rendering_type('depth')
        make_rendering_type('voxels')
        make_rendering_type('normal')
        make_tag('novelview')
        make_tag('novelmodel')
        make_tag('novelclass')
        make_tag('perspective_input')
        make_tag('orthographic_input')
        make_tag('perspective_output')
        make_tag('orthographic_output')
        make_tag('viewer_centered')
        make_tag('object_centered')
        make_tag('real_world')
        make_split('test')
        make_split('validation')

        # Quote from http://shapenet.cs.stanford.edu/shapenet/obj-zip/ShapeNetCore.v1/README.txt
        #   "The OBJ files have been pre-aligned so that the up direction is the +Y axis, and the front is the +X axis.  In addition each model is normalized to fit within a unit cube centered at the origin."
        oc_output_cam = camera.OrthographicCamera.from_Rt(transforms.lookat_matrix(cam_xyz=(0, 0, output_cam_distance_from_origin),
                                                                                   obj_xyz=(0, 0, 0),
                                                                                   up=(0, 1, 0)),
                                                          wh=(128, 128),
                                                          is_world_to_cam=True)
        db_oc_output_cam = get_db_camera(oc_output_cam, fov=None)
        txn.commit()

        all_matfiles = test_matfiles + validation_matfiles

        # populate categories
        for matfile in all_matfiles:
            anno = load_annotation_object(matfile)
            if anno.category_name in categories:
                continue
            db_category_i, _ = dbm.Category.get_or_create(name=anno.category_name)
            categories[anno.category_name] = db_category_i

        for matfile in all_matfiles:
            anno = load_annotation_object(matfile)
            model_name = '{}_{:02d}'.format(anno.category_name, anno.cad_index)
            if model_name in db_object_map:
                continue

            new_mesh_filename_db = '/mesh/pascal3d/{}.off'.format(model_name)
            new_mesh_filename = make_fullpath(new_mesh_filename_db)

            if not path.isfile(new_mesh_filename):
                io_utils.save_off(anno.fv(), new_mesh_filename)

            db_object = dbm.Object.create(
                name=model_name,
                category=categories[anno.category_name],
                dataset=datasets['pascal3d'],
                num_vertices=0,  # Not needed for now. Easy to fill in later.
                num_faces=0,
                mesh_filename=new_mesh_filename_db,
            )
            db_object_map[model_name] = db_object

            oc_rendering_name = model_name

            assert model_name not in db_object_centered_renderings
            db_object_centered_renderings[model_name] = {
                'output_rgb': dbm.ObjectRendering.create(
                    type=rendering_types['rgb'],
                    camera=db_oc_output_cam,
                    object=db_object,
                    # JPG
                    filename='/pascal3d/mv6_rgb_128/{}.bin'.format(oc_rendering_name),
                    resolution=128,
                    num_channels=3,
                    set_size=6,
                    is_normalized=False,
                ),
                'output_depth': dbm.ObjectRendering.create(
                    type=rendering_types['depth'],
                    camera=db_oc_output_cam,
                    object=db_object,
                    # Since there is only one gt rendering per model, their id is the same as the model name.
                    filename='/pascal3d/mv6_depth_128/{}.bin'.format(oc_rendering_name),
                    resolution=128,
                    num_channels=1,
                    set_size=6,
                    is_normalized=False,
                ),
                'output_normal': dbm.ObjectRendering.create(
                    type=rendering_types['normal'],
                    camera=db_oc_output_cam,
                    object=db_object,
                    filename='/pascal3d/mv6_normal_128/{}.bin'.format(oc_rendering_name),
                    resolution=128,
                    num_channels=3,
                    set_size=6,
                    is_normalized=False,
                ),
                'output_voxels': dbm.ObjectRendering.create(
                    type=rendering_types['voxels'],
                    camera=db_oc_output_cam,
                    object=db_object,
                    filename='/pascal3d/voxels_32/{}.bin'.format(oc_rendering_name),
                    resolution=32,
                    num_channels=1,
                    set_size=1,
                    is_normalized=False,
                )
            }

        txn.commit()

        for matfile in all_matfiles:
            anno = load_annotation_object(matfile)  # type: pascal.PascalAnnotation
            model_name = '{}_{:02d}'.format(anno.category_name, anno.cad_index)
            input_cam = anno.cam_object()
            # 49.1343 degrees is the default fov in blender.
            db_input_cam = get_db_camera(input_cam, fov=49.1343)

            input_cam_depth_xyz = (input_cam.pos / la.norm(input_cam.pos)) * 1.5
            input_cam_depth_Rt = transforms.lookat_matrix(cam_xyz=input_cam_depth_xyz,
                                                          obj_xyz=(0, 0, 0),
                                                          up=input_cam.up_vector)
            input_cam_depth = camera.OrthographicCamera.from_Rt(input_cam_depth_Rt, wh=(128, 128), is_world_to_cam=True)
            db_input_cam_depth = get_db_camera(input_cam_depth, fov=49.1343)

            output_cam_xyz = (input_cam.pos / la.norm(input_cam.pos)) * output_cam_distance_from_origin
            output_Rt = transforms.lookat_matrix(cam_xyz=output_cam_xyz,
                                                 obj_xyz=(0, 0, 0),
                                                 up=input_cam.up_vector)
            vc_output_cam = camera.OrthographicCamera.from_Rt(output_Rt, wh=(128, 128), is_world_to_cam=True)
            db_vc_output_cam = get_db_camera(vc_output_cam, fov=None)

            # ---
            db_object = db_object_map[model_name]

            # e.g. aeroplane_01_n03693474_7232
            vc_rendering_name = '{}_{}'.format(model_name, anno.jpg_filename.split('.')[0])

            # Input rgb image:
            db_object_rendering_input_rgb = dbm.ObjectRendering.create(
                type=rendering_types['rgb'],
                camera=db_input_cam,
                object=db_object,
                # This should already exist.
                filename='/pascal3d/single_rgb_128/{}.png'.format(vc_rendering_name),
                resolution=128,
                num_channels=1,
                set_size=1,
                is_normalized=False,  # False for rgb.
            )

            db_object_rendering_input_depth = dbm.ObjectRendering.create(
                type=rendering_types['depth'],
                camera=db_input_cam_depth,
                object=db_object,
                filename='/pascal3d/single_depth_128/{}.bin'.format(vc_rendering_name),
                resolution=128,
                num_channels=1,
                set_size=1,
                is_normalized=True,
            )

            db_object_rendering_vc_output_rgb = dbm.ObjectRendering.create(
                type=rendering_types['rgb'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/pascal3d/mv6_rgb_128/{}.bin'.format(vc_rendering_name),
                resolution=128,
                num_channels=3,
                set_size=6,
                is_normalized=False,
            )

            db_object_rendering_vc_output_depth = dbm.ObjectRendering.create(
                type=rendering_types['depth'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/pascal3d/mv6_depth_128/{}.bin'.format(vc_rendering_name),
                resolution=128,
                num_channels=1,
                set_size=6,
                is_normalized=False,
            )

            db_object_rendering_vc_output_normal = dbm.ObjectRendering.create(
                type=rendering_types['normal'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/pascal3d/mv6_normal_128/{}.bin'.format(vc_rendering_name),
                resolution=128,
                num_channels=3,
                set_size=6,
                is_normalized=False,
            )

            db_object_rendering_vc_output_voxels = dbm.ObjectRendering.create(
                type=rendering_types['voxels'],
                camera=db_vc_output_cam,
                object=db_object,
                filename='/pascal3d/voxels_32/{}.bin'.format(vc_rendering_name),
                resolution=32,
                num_channels=1,
                set_size=1,
                is_normalized=False,
            )

            # Examples
            # ----------------

            is_test = matfile in test_matfiles
            is_validation = matfile in validation_matfiles
            assert is_test ^ is_validation

            if is_test:
                split = splits['test']
            elif is_validation:
                split = splits['validation']
            else:
                raise NotImplementedError()

            # A row in the `Example` table is just an id for many-to-many references.

            # View centered
            example_viewer_centered = dbm.Example.create()
            dbm.ExampleObjectRendering.create(example=example_viewer_centered, rendering=db_object_rendering_input_rgb)
            dbm.ExampleObjectRendering.create(example=example_viewer_centered, rendering=db_object_rendering_input_depth)
            dbm.ExampleObjectRendering.create(example=example_viewer_centered, rendering=db_object_rendering_vc_output_depth)
            dbm.ExampleObjectRendering.create(example=example_viewer_centered, rendering=db_object_rendering_vc_output_normal)
            dbm.ExampleObjectRendering.create(example=example_viewer_centered, rendering=db_object_rendering_vc_output_rgb)
            dbm.ExampleObjectRendering.create(example=example_viewer_centered, rendering=db_object_rendering_vc_output_voxels)
            dbm.ExampleDataset.create(example=example_viewer_centered, dataset=datasets['pascal3d'])
            dbm.ExampleSplit.create(example=example_viewer_centered, split=split)
            dbm.ExampleTag.create(example=example_viewer_centered, tag=tags['real_world'])
            dbm.ExampleTag.create(example=example_viewer_centered, tag=tags['viewer_centered'])
            dbm.ExampleTag.create(example=example_viewer_centered, tag=tags['perspective_input'])
            dbm.ExampleTag.create(example=example_viewer_centered, tag=tags['orthographic_output'])
            dbm.ExampleTag.create(example=example_viewer_centered, tag=tags['novelmodel'])

            # Object centered
            example_object_centered = dbm.Example.create()
            dbm.ExampleObjectRendering.create(example=example_object_centered, rendering=db_object_rendering_input_rgb)
            dbm.ExampleObjectRendering.create(example=example_object_centered, rendering=db_object_rendering_input_depth)
            dbm.ExampleObjectRendering.create(example=example_object_centered, rendering=db_object_centered_renderings[model_name]['output_depth'])
            dbm.ExampleObjectRendering.create(example=example_object_centered, rendering=db_object_centered_renderings[model_name]['output_normal'])
            dbm.ExampleObjectRendering.create(example=example_object_centered, rendering=db_object_centered_renderings[model_name]['output_rgb'])
            dbm.ExampleObjectRendering.create(example=example_object_centered, rendering=db_object_centered_renderings[model_name]['output_voxels'])
            dbm.ExampleDataset.create(example=example_object_centered, dataset=datasets['pascal3d'])
            dbm.ExampleSplit.create(example=example_object_centered, split=split)
            dbm.ExampleTag.create(example=example_object_centered, tag=tags['real_world'])
            dbm.ExampleTag.create(example=example_object_centered, tag=tags['object_centered'])
            dbm.ExampleTag.create(example=example_object_centered, tag=tags['perspective_input'])
            dbm.ExampleTag.create(example=example_object_centered, tag=tags['orthographic_output'])
            dbm.ExampleTag.create(example=example_object_centered, tag=tags['novelmodel'])

        txn.commit()

    dbm.db.commit()