def vtk_raycasting_bsptree(): """Testing out raycasting inside of VTK using vtkModifiedBSPTree """ radius = 0.01 # read and turn OBJ to polydata polydata = wavefront.read_obj_to_polydata('./data/shape_model/ITOKAWA/itokawa_high.obj') renderer = vtk.vtkRenderer() pSource = np.array([1, 0, 0]) pTarget = np.array([0.00372, -0.0609, -0.0609]) # oriented bounding box for the polydata bspTree = vtk.vtkModifiedBSPTree() bspTree.SetDataSet(polydata) bspTree.BuildLocator() pointsVTKintersection = vtk.vtkPoints() code = bspTree.IntersectWithLine(pSource, pTarget, 1e-9, pointsVTKintersection, None) if code: points_int = numpy_support.vtk_to_numpy(pointsVTKintersection.GetData()) for p in points_int: graphics.vtk_addPoint(renderer, p , color=[0, 0, 1], radius=radius) print('{} intersections'.format(points_int.shape[0])) graphics.vtk_addPoly(renderer, polydata) graphics.vtk_addPoint(renderer, pSource, color=[0, 1.0, 0], radius=radius) graphics.vtk_addPoint(renderer, pTarget, color=[1.0, 0, 0], radius=radius) graphics.vtk_addLine(renderer, pSource, pTarget) graphics.vtk_show(renderer)
class TestPolyhedronPlotting(): polydata = wavefront.read_obj_to_polydata('./data/shape_model/EROS/eros_medium.obj') def test_draw_vtk_polyhedron(self): renderer = graphics.vtk_renderer() graphics.vtk_addPoly(renderer, self.polydata) graphics.vtk_show(renderer) def test_draw_mayavi_polyhedron(self): fig = graphics.mayavi_figure() mesh = graphics.mayavi_addPoly(fig, self.polydata)
class TestCompareVTKAndWavefrontWritingOBJ(): """Here we test that we can write to OBJ files using both VTK and Wavefront modules """ filename = './data/shape_model/ITOKAWA/itokawa_low.obj' vtkPolyData = wavefront.read_obj_to_polydata(filename) input_vertices, input_faces = wavefront.polydatatomesh(vtkPolyData) # write OBJ file using both VTK and Wavefront wavefront.write_vtkPolyData(vtkPolyData, '/tmp/vtk_output') wavefront.write_obj(input_vertices, input_faces, '/tmp/wavefront_output.obj') # now read our two files polydata_vtk_output = wavefront.read_obj_to_polydata('/tmp/vtk_output.obj') vtk_output_vertices, vtk_output_faces = wavefront.polydatatomesh( polydata_vtk_output) polydata_wavefront_output = wavefront.read_obj_to_polydata( '/tmp/wavefront_output.obj') wavefront_output_vertices, wavefront_output_faces = wavefront.polydatatomesh( polydata_wavefront_output) def test_number_vertices(self): np.testing.assert_allclose(self.vtk_output_vertices.shape, self.wavefront_output_vertices.shape) def test_number_of_faces(self): np.testing.assert_allclose(self.vtk_output_faces.shape, self.wavefront_output_faces.shape) def test_vertices_equal(self): np.testing.assert_allclose(self.vtk_output_vertices, self.wavefront_output_vertices) def test_faces_equal(self): np.testing.assert_allclose(self.vtk_output_faces, self.wavefront_output_faces)
def castalia_raycasting_plot(img_path): """Generate an image of the raycaster in work """ if not os.path.exists(img_path): os.makedirs(img_path) output_filename = os.path.join(img_path, 'castalia_raycasting_plot.jpg') input_filename = './data/shape_model/CASTALIA/castalia.obj' polydata = wavefront.read_obj_to_polydata(input_filename) # initialize the raycaster caster_obb = raycaster.RayCaster(polydata, flag='obb') caster_bsp = raycaster.RayCaster(polydata, flag='bsp') sensor = raycaster.Lidar(view_axis=np.array([1, 0, 0]), num_step=3) # need to translate the sensor and give it a pointing direction pos = np.array([2, 0, 0]) dist = 2 # distance for each raycast R = attitude.rot3(np.pi) # find the inersections # targets = pos + sensor.rotate_fov(R) * dist targets = sensor.define_targets(pos, R, dist) intersections_obb = caster_obb.castarray(pos, targets) intersections_bsp = caster_bsp.castarray(pos, targets) # plot fig = graphics.mayavi_figure() graphics.mayavi_addPoly(fig, polydata) graphics.mayavi_addPoint(fig, pos, radius=0.05) # draw lines from pos to all targets for pt in targets: graphics.mayavi_addLine(fig, pos, pt, color=(1, 0, 0)) for ints in intersections_bsp: graphics.mayavi_addPoint(fig, ints, radius=0.05, color=(1, 1, 0)) graphics.mayavi_axes(fig=fig, extent=[-1, 1, -1, 1, -1, 1]) graphics.mayavi_view(fig=fig) # savefig graphics.mayavi_savefig(fig=fig, filename=output_filename, magnification=4)
class TestVTKReadingOBJFilesItokawaHigh(): filename = './data/shape_model/ITOKAWA/itokawa_high.obj' vtkPolyData = wavefront.read_obj_to_polydata(filename) vtk_vertices, vtk_faces = wavefront.polydatatomesh(vtkPolyData) wavefront_verts, wavefront_faces = wavefront.read_obj(filename) def test_numpy_vertices_equal(self): """Convert both to numpy representation and compare """ np.testing.assert_allclose(self.vtk_vertices, self.wavefront_verts) def test_numpy_faces_equal(self): """Convert both to polydata representation and compare """ np.testing.assert_allclose(self.vtk_faces, self.wavefront_faces) def test_number_of_faces_equal(self): np.testing.assert_allclose(self.vtkPolyData.GetNumberOfPolys(), self.wavefront_faces.shape[0]) def test_number_of_vertices_equal(self): np.testing.assert_allclose(self.vtkPolyData.GetNumberOfPoints(), self.wavefront_verts.shape[0])
"""Compare speed of raycasting with Python vs. Python C++ Bindings Here we compare C++ raycasting to Python/VTK raycasting Author ------ Shankar Kulumani GWU [email protected] """ from point_cloud import wavefront, raycaster from lib import cgal, mesh_data import numpy as np pos = np.array([5, 0, 0]) target = np.array([0, 0, 0]) filename = './data/shape_model/ITOKAWA/itokawa_very_high.obj' polydata = wavefront.read_obj_to_polydata(filename) caster = raycaster.RayCaster(polydata, flag='bsp') intersection = caster.castray(pos, target) v, f = wavefront.read_obj(filename) mesh = mesh_data.MeshData(v, f) caster_cpp = cgal.RayCaster(mesh) intersection_cpp = caster_cpp.castray(pos, target)