def viewImg2(img, spacing, contours): print("In viewImg2: (min,max)=(%f,%f)"%(img.min(),img.max())) print("contours=",contours) mlab.figure(bgcolor=(0, 0, 0), size=(400, 400)) src = mlab.pipeline.scalar_field(img) # Our data is not equally spaced in all directions: src.spacing = [1, 1, 1] src.update_image_data = True # Extract some inner structures: the ventricles and the inter-hemisphere # fibers. We define a volume of interest (VOI) that restricts the # iso-surfaces to the inner of the brain. We do this with the ExtractGrid # filter. blur = mlab.pipeline.user_defined(src, filter='ImageGaussianSmooth') #mlab.pipeline.volume(blur, vmin=0.2, vmax=0.8) mlab.pipeline.iso_surface(src, contours=contours) #mlab.pipeline.image_plane_widget(blur, # plane_orientation='z_axes', # slice_index=img.shape[0]/2, # ) #voi = mlab.pipeline.extract_grid(blur) #voi.set(x_min=125, x_max=193, y_min=92, y_max=125, z_min=34, z_max=75) #mlab.pipeline.iso_surface(src, contours=[1,2], colormap='Spectral') #mlab.pipeline.contour3d(blur) mlab.view(-125, 54, 'auto','auto') mlab.roll(-175) mlab.show()
def viewImg(img, spacing, contours): mlab.figure(bgcolor=(0, 0, 0), size=(400, 400)) src = mlab.pipeline.scalar_field(img) # Our data is not equally spaced in all directions: src.spacing = [1, 1, 1] src.update_image_data = True # Extract some inner structures: the ventricles and the inter-hemisphere # fibers. We define a volume of interest (VOI) that restricts the # iso-surfaces to the inner of the brain. We do this with the ExtractGrid # filter. blur = mlab.pipeline.user_defined(blur, filter='ImageGaussianSmooth') print("blur type is",type(blur),blur.max()) #voi = mlab.pipeline.extract_grid(blur) #voi.set(x_min=125, x_max=193, y_min=92, y_max=125, z_min=34, z_max=75) #mlab.pipeline.iso_surface(src, contours=[1,2], colormap='Spectral') mlab.pipeline.contour3d(blur) mlab.view(-125, 54, 'auto','auto') mlab.roll(-175) mlab.show()
def viewImgWithNodes(img, spacing, contours,g, title=''): mlab.figure(bgcolor=(0, 0, 0), size=(900, 900)) #src = mlab.pipeline.scalar_field(img) ## Our data is not equally spaced in all directions: #src.spacing = [1, 1, 1] #src.update_image_data = True # #mlab.pipeline.iso_surface(src, contours=contours, opacity=0.2) nodes = np.array(g.nodes()) dsize = 4*np.ones(nodes.shape[0],dtype='float32') print(dsize.shape,nodes.shape) #mlab.points3d(nodes[:,0],nodes[:,1],nodes[:,2],color=(0.0,1.0,0.0)) mlab.points3d(nodes[:,2],nodes[:,1],nodes[:,0],dsize,color=(0.0,0.0,1.0), scale_factor=0.25) for n1, n2, edge in g.edges(data=True): path = [n1]+edge['path']+[n2] pa = np.array(path) #print pa mlab.plot3d(pa[:,2],pa[:,1],pa[:,0],color=(0,1,0),tube_radius=0.25) mlab.view(-125, 54, 'auto','auto') mlab.roll(-175) mlab.title(title, height=0.1) mlab.show()
def autoPositionCamera(): """ Set camera position looking along the x-axis. """ s = mlab.gcf().scene s.disable_render = True mlab.view(90,90,distance='auto',focalpoint='auto') mlab.roll(0) s.disable_render = False
def __view(self, viewargs=None, roll=None): """Wrapper for mlab.view() Parameters ---------- viewargs: dict mapping with keys corresponding to mlab.view args roll: num int or float to set camera roll Returns ------- camera settings: tuple view settings, roll setting """ from enthought.mayavi import mlab if viewargs: viewargs['reset_roll'] = True mlab.view(**viewargs) if not roll is None: mlab.roll(roll) return mlab.view(), mlab.roll()
def viewGraph(g, sub=3,title=''): mlab.figure(bgcolor=(0, 0, 0), size=(900, 900)) nodes = g.nodes() random.shuffle(nodes) nodes = np.array(nodes[0:100]) #mlab.points3d(nodes[:,2],nodes[:,1],nodes[:,0],color=(0.0,0.0,1.0)) edges = g.edges(data=True) print(len(edges)) input('continue') count = 0 for n1, n2, edge in edges: count += 1 if( count % 100 == 0 ): print(count) path = [n1]+edge['path']+[n2] pa = np.array(path) #print pa mlab.plot3d(pa[::sub,2],pa[::sub,1],pa[::sub,0],color=(0,1,0),tube_radius=0.75) mlab.view(-125, 54, 'auto','auto') mlab.roll(-175) mlab.title(title, height=0.1) mlab.show()
connect_ = tvtk.PolyDataConnectivityFilter(extraction_mode=4) connect = mlab.pipeline.user_defined(smooth, filter=connect_) # Compute normals for shading the surface compute_normals = mlab.pipeline.poly_data_normals(connect) compute_normals.filter.feature_angle = 80 surf = mlab.pipeline.surface(compute_normals, color=(0.9, 0.72, 0.62)) #---------------------------------------------------------------------- # Display a cut plane of the raw data ipw = mlab.pipeline.image_plane_widget(src, colormap='bone', plane_orientation='z_axes', slice_index=55) mlab.view(-165, 32, 350, [143, 133, 73]) mlab.roll(180) fig.scene.disable_render = False #---------------------------------------------------------------------- # To make the link between the Mayavi pipeline and the much more # complex VTK pipeline, we display both: mlab.show_pipeline(rich_view=False) from enthought.tvtk.pipeline.browser import PipelineBrowser browser = PipelineBrowser(fig.scene) browser.show() mlab.show()
# Create the points src = mlab.pipeline.scalar_scatter( x, y, z, s ) # Connect them src.mlab_source.dataset.lines = connections # The stripper filter cleans up connected lines lines = mlab.pipeline.stripper( src ) # Finally, display the set of lines mlab.pipeline.surface( mlab.pipeline.tube( lines, tube_sides=7, tube_radius=0.1 ), opacity=.4, colormap='Accent' ) # And choose a nice view mlab.view( 33.6, 106, 5.5, [0, 0, .05] ) mlab.roll( 125 ) mlab.show() from enthought.mayavi import mlab import numpy as np mlab.clf() # Number of lines n_lines = 200 # Number of points per line n_points = 100 # Create Example Coordinates
[np.arange(index, index + N - 1.5), np.arange(index+1, index + N - .5)] ).T) index += N # Now collapse all positions, scalars and connections in big arrays x = np.hstack(x) y = np.hstack(y) z = np.hstack(z) s = np.hstack(s) connections = np.vstack(connections) # Create the points src = mlab.pipeline.scalar_scatter(x, y, z, s) # Connect them src.mlab_source.dataset.lines = connections # The stripper filter cleans up connected lines lines = mlab.pipeline.stripper(src) # Finally, display the set of lines mlab.pipeline.surface(lines, colormap='Accent', line_width=1, opacity=.4) # And choose a nice view mlab.view(33.6, 106, 5.5, [0, 0, .05]) mlab.roll(125) mlab.show()
s = m.surf(n_e_arr[1], n_e_arr[2], n_mu_q_arr[0, :, :]) m.axes( s, color=(0.7, 0.7, 0.7), extent=(-1, 1, 0, 1, 0, 1), ranges=(-0.21, 0.21, 0.1, 20, 0, max_mu_q), xlabel="x", ylabel="Lr", zlabel="Force", ) m.view(-60.0, 70.0, focalpoint=[0.0, 0.45, 0.45]) # Store the information view = m.view() roll = m.roll() print "view", view print "roll", roll print n_mu_q_arr.shape[2] ms = s.mlab_source for i in range(1, n_mu_q_arr.shape[0]): ms.scalars = n_mu_q_arr[i, :, :] fname = "x%02d.jpg" % i print "saving", fname m.savefig(fname) sleep(0.1) # m.surf( n_e_arr[0], n_e_arr[1], n_mu_q_arr + n_std_q_arr ) # m.surf( n_e_arr[0], n_e_arr[1], n_mu_q_arr - n_std_q_arr ) m.show()
vmax=2600) cut_plane2.implicit_plane.origin = (136, 111.5, 82) cut_plane2.implicit_plane.widget.enabled = False # Extract two views of the outside surface. We need to define VOIs in # order to leave out a cut in the head. voi2 = mlab.pipeline.extract_grid(src) voi2.set(y_min=112) outer = mlab.pipeline.iso_surface(voi2, contours=[ 1776, ], color=(0.8, 0.7, 0.6)) voi3 = mlab.pipeline.extract_grid(src) voi3.set(y_max=112, z_max=53) outer3 = mlab.pipeline.iso_surface(voi3, contours=[ 1776, ], color=(0.8, 0.7, 0.6)) mlab.view(-125, 54, 326, (145.5, 138, 66.5)) mlab.roll(-175) mlab.show() import shutil shutil.rmtree('mri_data')
cut_plane.implicit_plane.widget.enabled = False cut_plane2 = mlab.pipeline.scalar_cut_plane(thr, plane_orientation='z_axes', colormap='black-white', vmin=1400, vmax=2600) cut_plane2.implicit_plane.origin = (136, 111.5, 82) cut_plane2.implicit_plane.widget.enabled = False # Extract two views of the outside surface. We need to define VOIs in # order to leave out a cut in the head. voi2 = mlab.pipeline.extract_grid(src) voi2.set(y_min=112) outer = mlab.pipeline.iso_surface(voi2, contours=[1776, ], color=(0.8, 0.7, 0.6)) voi3 = mlab.pipeline.extract_grid(src) voi3.set(y_max=112, z_max=53) outer3 = mlab.pipeline.iso_surface(voi3, contours=[1776, ], color=(0.8, 0.7, 0.6)) mlab.view(-125, 54, 326, (145.5, 138, 66.5)) mlab.roll(-175) mlab.show() import shutil shutil.rmtree('mri_data')
#filter.filter.source = extract_vector_norm.outputs[0] #filter.filter.source = surface.outputs[0] print "vectors" vectors = mlab.pipeline.vectors(filter, mode='2darrow') print "polishing" vectors.glyph.color_mode = 'no_coloring' vectors.actor.property.color = (0, 0, 0) vectors.glyph.glyph.scale_factor = 0.25 vectors.glyph.glyph_source.glyph_source.scale = 1.033 vectors.glyph.glyph_source.glyph_source.center = array([0.5, 0, 0]) print "done" mlab.view(0.0, 0.0, 19.6, array([5, 5, 0])) mlab.roll(0) #mlab.show() for i in range(max_frame_number + 1): print "doing:", i vtk_file_reader.timestep = i vectors.actor.property.color = (0, 0, 0) vectors.glyph.glyph.scale_factor = 0.25 vectors.glyph.glyph_source.glyph_source.scale = 1.033 * 0.5 vectors.glyph.glyph_source.glyph_source.center = array([0.5, 0, 0]) mlab.savefig("output/frame_vec%04d.png" % i) print "Files saved to output/*" print """Create the video using: ffmpeg -i output/frame_vec%04d.png -r 15 -vcodec copy output/output.avi
#filter.filter.source = extract_vector_norm.outputs[0] #filter.filter.source = surface.outputs[0] print "vectors" vectors = mlab.pipeline.vectors(filter, mode='2darrow') print "polishing" vectors.glyph.color_mode = 'no_coloring' vectors.actor.property.color = (0, 0, 0) vectors.glyph.glyph.scale_factor = 0.25 vectors.glyph.glyph_source.glyph_source.scale = 1.033 vectors.glyph.glyph_source.glyph_source.center = array([0.5, 0, 0]) print "done" mlab.view(0.0, 0.0, 19.6, array([5, 5, 0])) mlab.roll(0) #mlab.show() for i in range(max_frame_number+1): print "doing:", i vtk_file_reader.timestep = i vectors.actor.property.color = (0, 0, 0) vectors.glyph.glyph.scale_factor = 0.25 vectors.glyph.glyph_source.glyph_source.scale = 1.033*0.5 vectors.glyph.glyph_source.glyph_source.center = array([0.5, 0, 0]) mlab.savefig("output/frame_vec%04d.png" % i) print "Files saved to output/*" print """Create the video using: ffmpeg -i output/frame_vec%04d.png -r 15 -vcodec copy output/output.avi
vtk_file_reader = engine.open(u'output/frame_scal0000.vtk') warp_scalar = WarpScalar() engine.add_filter(warp_scalar, vtk_file_reader) surface = Surface() engine.add_filter(surface, warp_scalar) warp_scalar.filter.normal = array([ 0., 0., 1.]) warp_scalar.filter.scale_factor = 10.0 module_manager = engine.scenes[0].children[0].children[0].children[0] module_manager.scalar_lut_manager.use_default_range = False module_manager.scalar_lut_manager.data_range = array([-1.5, 1.5]) module_manager.scalar_lut_manager.show_scalar_bar = True mlab.view(-122, 53, 26, array([7.5, 2.5, -0.11])) mlab.roll(40) print " done." for i in range(max_frame_number+1): print "doing:", i vtk_file_reader.timestep = i mlab.savefig("output/frame_scal%04d.png" % i) print "Files saved to output/*" print """Create the video using: ffmpeg -i output/frame_scal%04d.png -r 15 -vcodec copy output/output.avi ffmpeg2theora output/output.avi -o output.ogv To produce a FLV video, use: ffmpeg -b 3600k -i output/frame_scal%04d.png -r 15 video.flv """