def test_render_preset(self): """Checks that render returns the expected value.""" x_voxels_init, y_images_init = test_helpers.generate_preset_test_voxels_emission_absorption_render( ) voxels = tf.convert_to_tensor(value=x_voxels_init) y_images = tf.convert_to_tensor(value=y_images_init) y = emission_absorption.render(voxels, absorption_factor=0.1, cell_size=0.1) self.assertAllClose(y_images, y)
def render_voxels_from_blender_camera(voxels, object_rotation, object_translation, height, width, focal, principal_point, camera_rotation_matrix, camera_translation_vector, frustum_size=(256, 256, 512), absorption_factor=0.1, cell_size=1.0, depth_min=0.0, depth_max=5.0): """Renders the voxels according to their position in the world.""" batch_size = voxels.shape[0] voxel_size = voxels.shape[1] sampling_volume = sampling_points_from_frustum(height, width, focal, principal_point, depth_min=depth_min, depth_max=depth_max, frustum_size=frustum_size) sampling_volume = \ place_frustum_sampling_points_at_blender_camera(sampling_volume, camera_rotation_matrix, camera_translation_vector) interpolated_voxels = \ object_rotation_in_blender_world(voxels, object_rotation) # Adjust the camera (translate the camera instead of the object) sampling_volume = sampling_volume - object_translation sampling_volume = sampling_volume/CUBE_BOX_DIM sampling_volume = sampling_points_to_voxel_index(sampling_volume, voxel_size) camera_voxels = trilinear.interpolate(interpolated_voxels, sampling_volume) camera_voxels = tf.reshape(camera_voxels, [batch_size] + list(frustum_size) + [4]) voxel_image = emission_absorption.render(camera_voxels, absorption_factor=absorption_factor, cell_size=cell_size) return voxel_image