def SedimentColour(self, z): # The method changes the texture of the sediment surface. The texture is such that color of each element is # directly correlated to the sediment depth of the element. # ============== BUG ============== BUG ============== BUG ============== BUG ============== BUG ============== # There is a bug which occurs randomly, in those cases the texture is transposed. It is unclear what causes the # bug and what can be done to remove it. # ============== BUG ============== BUG ============== BUG ============== BUG ============== BUG ============== from PIL import Image b = np.zeros((512, 512, 3)) b[:, :, 1] = 255 * z / np.max(z) img = Image.fromarray(b.astype('uint8'), 'RGB') #img.show() img = img.rotate( 90 ) # The rotation is necessary for the image to align with the surface properly. #img = img.transpose(Image.TRANSPOSE) # The rotation is necessary for the image to align with the surface properly. img.save('my.png') bmp1 = tvtk.PNGReader() #bmp1 = tvtk.JPEGReader() bmp1.file_name = "my.png" my_texture = tvtk.Texture(input_connection=bmp1.output_port, interpolate=0) # If the scalar_visibility is not False the colour of the texture will depend on the height of the surface. When # they value is false the appearance of the texture do not depend on the height of the surface, THIS IS CRUCIAL. self.a.actor.mapper.scalar_visibility = False self.a.actor.enable_texture = True self.a.actor.tcoord_generator_mode = 'plane' self.a.actor.actor.texture = my_texture
def plot_Earth_Mayavi( earthTexture='MAPLEAF/IO/blue_marble_spherical_splitFlipped.jpg'): from mayavi import mlab from tvtk.api import tvtk # python wrappers for the C++ vtk ecosystem # create a figure window (and scene) fig = mlab.figure(size=(600, 600)) # load and map the texture img = tvtk.JPEGReader() img.file_name = getAbsoluteFilePath(earthTexture) texture = tvtk.Texture(input_connection=img.output_port, interpolate=1) # (interpolate for a less raster appearance when zoomed in) # use a TexturedSphereSource, a.k.a. getting our hands dirty R = 6371007.1809 Nrad = 180 # create the sphere source with a given radius and angular resolution sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad, phi_resolution=Nrad) # assemble rest of the pipeline, assign texture sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port) sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture) fig.scene.add_actor(sphere_actor)
def _render_mesh(self, mesh_type="surface", ambient_light=0.0, specular_light=0.0, alpha=1.0): from tvtk.api import tvtk pd = tvtk.PolyData() pd.points = self.points pd.polys = self.trilist pd.point_data.t_coords = self.tcoords_per_point mapper = tvtk.PolyDataMapper() mapper.set_input_data(pd) p = tvtk.Property( representation=mesh_type, opacity=alpha, ambient=ambient_light, specular=specular_light, ) actor = tvtk.Actor(mapper=mapper, property=p) # Get the pixels from our image class which are [0, 1] and scale # back to valid pixels. Then convert to tvtk ImageData. texture = self.texture.pixels_with_channels_at_back(out_dtype=np.uint8) if self.texture.n_channels == 1: texture = np.stack([texture] * 3, axis=-1) image_data = np.flipud(texture).ravel() image_data = image_data.reshape([-1, 3]) image = tvtk.ImageData() image.point_data.scalars = image_data image.dimensions = self.texture.width, self.texture.height, 1 texture = tvtk.Texture() texture.set_input_data(image) actor.texture = texture self.figure.scene.add_actors(actor) self._actors.append(actor)
def update_plot(self, terrain, track): # This function is called when the view is opened. We don't # populate the scene when the view is not yet open, as some # VTK features require a GLContext. # We can do normal mlab calls on the embedded scene. # self.scene.mlab.test_points3d() # Here's were I embedded my code self.scene.mlab.clf() # Plot the elevation mesh elevation_mesh = self.scene.mlab.mesh(terrain['x'], terrain['y'], terrain['z']) # Read and apply texture bmp = tvtk.PNGReader(file_name=bombo.TEXTURE_FILE) texture = tvtk.Texture(input_connection=bmp.output_port, interpolate=1) elevation_mesh.actor.actor.mapper.scalar_visibility = False elevation_mesh.actor.enable_texture = True elevation_mesh.actor.tcoord_generator_mode = 'plane' elevation_mesh.actor.actor.texture = texture # Display path nodes if len(track['x']) == 1: track_line = self.scene.mlab.points3d( track['x'], track['y'], track['z'], color=track['color'], mode='sphere', scale_factor=track['line_radius'] * 10) else: track_line = self.scene.mlab.plot3d( track['x'], track['y'], track['z'], color=track['color'], line_width=10.0, tube_radius=track['line_radius']) # Display north text north_label = self.scene.mlab.text3d( (terrain['x'][0][0] + terrain['x'][-1][0]) / 2, terrain['y'][0][0], np.max(terrain['z']), "NORTH", scale=(track['textsize'], track['textsize'], track['textsize'])) # Displaying start test if len(track['x']) > 1: start_label = self.scene.mlab.text3d(track['x'][0], track['y'][0], track['z'][0] * 1.5, "START", scale=(track['textsize'], track['textsize'], track['textsize']))
def image_to_vtk_texture(img): imgdata = tvtk.ImageData() t = img[::-1].reshape(-1, 3).astype(np.uint8) imgdata.point_data.scalars = t imgdata.extent = (0, img.shape[0] - 1, 0, img.shape[1] - 1, 0, 0) imgdata.dimensions = (img.shape[1], img.shape[0], 1) vtk_texture = tvtk.Texture() configure_input_data(vtk_texture, imgdata) return vtk_texture
def setUp(self): # Make a temporary directory for saved figures self.temp_dir = tempfile.mkdtemp() self.filename = os.path.join(self.temp_dir, "saved_figure.png") # this ensures that the temporary directory is removed self.addCleanup(self.remove_tempdir) # texture image # the image is a black-white checker box pattern image_path = os.path.join(MY_DIR, "images", "checker.jpg") img = tvtk.JPEGReader(file_name=image_path) self.texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
def setup_pipeline(self): """Override this method so that it *creates* its tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. """ self.mapper = tvtk.PolyDataMapper(use_lookup_table_scalar_range=1) self.actor = tvtk.Actor() self.property = self.actor.property self.texture = tvtk.Texture()
def createBed(self): x1, y1, z1 = (0, 210, 0.1) # | => pt1 x2, y2, z2 = (210, 210, 0.1) # | => pt2 x3, y3, z3 = (0, 0, 0.1) # | => pt3 x4, y4, z4 = (210, 0, 0.1) # | => pt4 bed = mlab.mesh([[x1, x2], [x3, x4]], [[y1, y2], [y3, y4]], [[z1, z2], [z3, z4]], color=self.bedcolor) img = tvtk.JPEGReader(file_name=sys.path[0] + "/bed_texture.jpg") texture = tvtk.Texture(input_connection=img.output_port, interpolate=1, repeat=0) bed.actor.actor.texture = texture bed.actor.tcoord_generator_mode = 'plane'
def draw(self, figure): from tvtk.api import tvtk import tempfile import urllib.request from pathlib import Path local_filename = Path("/hermes_temp/blue_marble.jpg") if not local_filename.is_file(): local_filename.parent.mkdir(parents=True, exist_ok=True) print("Downloading Earth") from tqdm import tqdm dbar = tqdm(leave=False) def download_bar(count, block_size, total_size): dbar.total = total_size dbar.update(block_size) local_filename, headers = urllib.request.urlretrieve( "https://eoimages.gsfc.nasa.gov/images/imagerecords/73000/73909/world.topo.bathy.200412.3x5400x2700.jpg", "/hermes_temp/blue_marble.jpg", reporthook=download_bar) else: local_filename = str(local_filename) img = tvtk.JPEGReader() img.file_name = local_filename texture = tvtk.Texture(input_connection=img.output_port, interpolate=1) # use a TexturedSphereSource, a.k.a. getting our hands dirty Nrad = 180 # create the sphere source with a given radius and angular resolution sphere = tvtk.TexturedSphereSource(radius=self.poli_body.R_mean.to( visualisation.SCALE_UNIT).value, theta_resolution=Nrad, phi_resolution=Nrad) # assemble rest of the pipeline, assign texture sphere_mapper = tvtk.PolyDataMapper( input_connection=sphere.output_port) self.sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture) figure.scene.add_actor(self.sphere_actor)
def _render_mesh(self): from tvtk.api import tvtk pd = tvtk.PolyData() pd.points = self.points pd.polys = self.trilist pd.point_data.t_coords = self.tcoords_per_point mapper = tvtk.PolyDataMapper(input=pd) actor = tvtk.Actor(mapper=mapper) # Get the pixels from our image class which are [0, 1] and scale # back to valid pixels. Then convert to tvtk ImageData. image_data = np.flipud(np.array(self.texture.as_PILImage())).ravel() image_data = image_data.reshape([-1, 3]) image = tvtk.ImageData() image.point_data.scalars = image_data image.dimensions = self.texture.width, self.texture.height, 1 texture = tvtk.Texture(input=image) actor.texture = texture self.figure.scene.add_actors(actor)
def _render_mesh(self): from tvtk.api import tvtk pd = tvtk.PolyData() pd.points = self.points pd.polys = self.trilist pd.point_data.t_coords = self.tcoords_per_point mapper = tvtk.PolyDataMapper(input=pd) actor = tvtk.Actor(mapper=mapper) # Get the pixels from our image class which are [0, 1] and scale # back to valid pixels. Then convert to tvtk ImageData. image_data = np.flipud(self.texture.pixels * 255).flatten().reshape( [-1, 3]).astype(np.uint8) image = tvtk.ImageData() image.point_data.scalars = image_data image.dimensions = self.texture.shape[1], self.texture.shape[0], 1 texture = tvtk.Texture(input=image) actor.texture = texture self.figure.scene.add_actors(actor)
def showin3d(self, imagename='defaultconverted'): # pdb.set_trace() bmp1 = tvtk.JPEGReader() bmp1.file_name = "simulationdata/visualize/" + imagename + "mayavi.jpeg" #any jpeg file my_texture = tvtk.Texture() my_texture.interpolate = 0 my_texture.set_input(0, bmp1.get_output()) # mlab.figure(size=(640, 800), bgcolor=(0.16, 0.28, 0.46)) # a = np.load('simulationdata/corrected_terrain_mt.npy') x = self.X y = self.Y y = y.max() - y z = scale(self.Z, 1000, 1) surf = mlab.mesh(x, y, z, color=(1, 1, 1)) surf.actor.enable_texture = True surf.actor.tcoord_generator_mode = 'plane' surf.actor.actor.texture = my_texture mlab.show()
def draw(self, figure): from tvtk.api import tvtk img = tvtk.JPEGReader() img.file_name = "D:/git/thesis/mmWaveISL/mmWaveISL/blue_marble.jpg" texture = tvtk.Texture(input_connection=img.output_port, interpolate=1) # use a TexturedSphereSource, a.k.a. getting our hands dirty Nrad = 180 # create the sphere source with a given radius and angular resolution sphere = tvtk.TexturedSphereSource(radius=self.R_mean.to(u.km).value, theta_resolution=Nrad, phi_resolution=Nrad) # assemble rest of the pipeline, assign texture sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port) self.sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture) figure.scene.add_actor(self.sphere_actor)
def manual_sphere(image_file): # caveat 1: flip the input image along its first axis img = plt.imread(image_file) # shape (N,M,3), flip along first dim #outfile = image_file.replace('.png', '_flipped.png') # flip output along first dim to get right chirality of the mapping #img = img[::-1,...] #plt.imsave(outfile, img) #image_file = outfile # work with the flipped file from now on # parameters for the sphere R = 1 # radius of the sphere Nrad = 180 # points along theta and phi phi = np.linspace(0, 2 * np.pi, Nrad) # shape (Nrad,) theta = np.linspace(0, np.pi, Nrad) # shape (Nrad,) phigrid, thetagrid = np.meshgrid(phi, theta) # shapes (Nrad, Nrad) # compute actual points on the sphere x = R * np.sin(thetagrid) * np.cos(phigrid) y = R * np.sin(thetagrid) * np.sin(phigrid) z = R * np.cos(thetagrid) # create figure mlab.figure(size=(600, 600)) # create meshed sphere mesh = mlab.mesh(x, y, z) mesh.actor.actor.mapper.scalar_visibility = False mesh.actor.enable_texture = True # probably redundant assigning the texture later # load the (flipped) image for texturing img = tvtk.PNGReader(file_name=image_file) texture = tvtk.Texture(input_connection=img.output_port, interpolate=0, repeat=0) mesh.actor.actor.texture = texture # tell mayavi that the mapping from points to pixels happens via a sphere mesh.actor.tcoord_generator_mode = 'sphere' # map is already given for a spherical mapping cylinder_mapper = mesh.actor.tcoord_generator # caveat 2: if prevent_seam is 1 (default), half the image is used to map half the sphere cylinder_mapper.prevent_seam = 0 # use 360 degrees, might cause seam but no fake data
def auto_sphere(image_file): # create a figure window (and scene) fig = mlab.figure(size=(600, 600)) # load and map the texture img = tvtk.PNGReader() img.file_name = image_file texture = tvtk.Texture(input_connection=img.output_port, interpolate=1) # (interpolate for a less raster appearance when zoomed in) # use a TexturedSphereSource, a.k.a. getting our hands dirty R = 1 Nrad = 180 # create the sphere source with a given radius and angular resolution sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad, phi_resolution=Nrad) # assemble rest of the pipeline, assign texture sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port) sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture) fig.scene.add_actor(sphere_actor)
def draw_bilboard(fig, texture_name, pos=(0, 0, 0), scale=1.0): plane = tvtk.PlaneSource(center=pos) # pdb.set_trace() # plane = resize(plane, scale) reader = tvtk.PNGReader() reader.set_data_scalar_type_to_unsigned_char() reader.set(file_name=texture_name) plane_texture = tvtk.Texture() plane_texture.set_input(reader.get_output()) plane_texture.set(interpolate=0) # pdb.set_trace() map = tvtk.TextureMapToPlane() map.set_input(plane.get_output()) plane_mapper = tvtk.PolyDataMapper() plane_mapper.set(input=map.get_output()) p = tvtk.Property(opacity=1.0, color=(1, 0, 0)) plane_actor = tvtk.Actor(mapper=plane_mapper, texture=plane_texture) fig.scene.add_actor(plane_actor)
def createSphere(fig, image_file): # load and map the texture img = tvtk.JPEGReader() img.file_name = image_file texture = tvtk.Texture(input_connection=img.output_port, interpolate=1) # (interpolate for a less raster appearance when zoomed in) # use a TexturedSphereSource, a.k.a. getting our hands dirty R = 6371 * 1000 Nrad = 360 # create the sphere source with a given radius and angular resolution sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad, phi_resolution=Nrad) # assemble rest of the pipeline, assign texture sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port) sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture) fig.scene.add_actor(sphere_actor) sphere_actor.rotate_z(180) return fig, sphere_actor
def _render_mesh(self): from tvtk.api import tvtk pd = tvtk.PolyData() pd.points = self.points pd.polys = self.trilist pd.point_data.t_coords = self.tcoords_per_point mapper = tvtk.PolyDataMapper() mapper.set_input_data(pd) actor = tvtk.Actor(mapper=mapper) # Get the pixels from our image class which are [0, 1] and scale # back to valid pixels. Then convert to tvtk ImageData. texture = self.texture.pixels_with_channels_at_back(out_dtype=np.uint8) if self.texture.n_channels == 1: texture = np.stack([texture] * 3, axis=-1) image_data = np.flipud(texture).ravel() image_data = image_data.reshape([-1, 3]) image = tvtk.ImageData() image.point_data.scalars = image_data image.dimensions = self.texture.width, self.texture.height, 1 texture = tvtk.Texture() texture.set_input_data(image) actor.texture = texture self.figure.scene.add_actors(actor)
# create a 2d view of the array ary_2d = ary[:] ary_2d.shape = sz[0] * sz[1], sz[2] img.point_data.scalars = ary_2d else: raise ValueError("ary must be 3 dimensional.") return img sz = (256, 256, 3) array_3d = zeros(sz, uint8) img = image_from_array(array_3d) t = tvtk.Texture(interpolate=1) configure_input_data(t, img) a.texture = t # Renderwindow stuff and add actor. rw = tvtk.RenderWindow(size=(600, 600)) ren = tvtk.Renderer(background=(0.1, 0.2, 0.4)) rw.add_renderer(ren) rwi = tvtk.RenderWindowInteractor(render_window=rw) ren.add_actor(a) rwi.initialize() # create a little wave to slide across the image. wave = 1 / sqrt(2 * pi) * exp(-arange(-2, 2, .05)**2 / 2) * 255 # have to use += here because = doesn't respect broadcasting correctly. array_3d[:len(wave)] += wave.astype(uint8)[:, None, None]
from osgeo import gdal from tvtk.api import tvtk from mayavi import mlab import Image im1 = Image.open("../texture.jpg") im2 = im1.rotate(90) im2.save("../tmp/ortofoto90.jpg") bmp1 = tvtk.JPEGReader(file_name="../tmp/ortofoto90.jpg") my_texture=tvtk.Texture() my_texture.interpolate=0 my_texture=tvtk.Texture(input_connection=bmp1.output_port, interpolate=0) surf=mlab.pipeline.surface(mlab.pipeline.open("PLY/target.ply")) surf.actor.enable_texture = True surf.actor.tcoord_generator_mode = 'plane' surf.actor.actor.texture = my_texture mlab.show()
def maven_orbit_image(time, camera_pos=[1, 0, 0], camera_up=[0, 0, 1], extent=3, parallel_projection=True, view_from_orbit_normal=False, view_from_periapsis=False, show_maven=False, show_orbit=True, label_poles=None, show=True, transparent_background=False, background_color=(0, 0, 0)): """Creates an image of Mars and the MAVEN orbit at a specified time. Parameters ---------- time : str Time to diplay, in a string format interpretable by spiceypy.str2et. camera_pos : length 3 iterable Position of camera in MSO coordinates. camera_up : length 3 iterable Vector defining the image vertical. extent : float Half-width of image in Mars radii. parallel_projection : bool Whether to display an isomorphic image from the camera position. If False, goofy things happen. Defaults to True. view_from_orbit_normal : bool Override camera_pos with a camera position along MAVEN's orbit normal. Defaults to False. view_from_periapsis : bool Override camera_pos with a camera position directly above MAVEN's periapsis. Defaults to False. show_maven : bool Whether to draw a circle showing the position of MAVEN at the specified time. Defaults to False. show_orbit : bool Whether to draw the MAVEN orbit. Defaults to True. label_poles : bool Whether to draw an 'N' and 'S' above the visible poles of Mars. show : bool Whether to show the image when called, or supress display. Defaults to True. transparent_background : bool If True, the image background is transparent, otherwise it is set to background_color. Defaults to False. background_color : RGB1 tuple Background color to use if transparent_background=False. Specified as an RGB tuple with values between 0 and 1. Returns ------- rgb_array : 1000x1000x3 numpy array of image RGB values Image RGB values. return_coords : dict Description of the image coordinate system useful for plotting on top of output image. Notes ----- Call maven_iuvs.load_iuvs_spice() before calling this function to ensure kernels are loaded. """ myet = spice.str2et(time) # disable mlab display (this is done by matplotlib later) mlab.options.offscreen = True # create a figure window (and scene) mlab_pix = 1000 mfig = mlab.figure(size=(mlab_pix, mlab_pix), bgcolor=background_color) # disable rendering as objects are added mfig.scene.disable_render = True # # Set up the planet surface # # load and map the Mars surface texture image_file = os.path.join(anc_dir, 'marssurface_2.jpg') img = tvtk.JPEGReader() img.file_name = image_file texture = tvtk.Texture(input_connection=img.output_port, interpolate=1) # attach the texture to a sphere mars_radius = 3395. sphere_radius = 1 # radius of planet is 1 rM sphere_resolution = 180 # 180 points on the sphere sphere = tvtk.TexturedSphereSource(radius=sphere_radius, theta_resolution=sphere_resolution, phi_resolution=sphere_resolution) sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port) mars = tvtk.Actor(mapper=sphere_mapper, texture=texture) # adjust the reflection properties for a pretty image mars.property.ambient = 0.2 # so the nightside is slightly visible mars.property.specular = 0.15 # make it shinier near dayside # now apply the rotation matrix to the planet # tvtk only thinks about rotations with Euler angles, so we need # to use a SPICE routine to get these from the rotation matrix # to get from the surface to MSO coordinates we'd normally do # this: rmat = spice.pxform('IAU_MARS', 'MAVEN_MSO', myet) # but we need to use transpose because spice.m2eul assumes the matrix # defines a coordinate system rotation, the inverse of the matrix # to rotate vectors trmat = spice.pxform('MAVEN_MSO', 'IAU_MARS', myet) # now we can get the Euler angles rangles = np.rad2deg(spice.m2eul(trmat, 2, 1, 3)) # ^^^^^^^^ # 2,1,3 because vtk performs # rotations in the order # z,x,y and SPICE wants these # in REVERSE order mars.orientation = rangles[[1, 0, 2]] # ^^^^^^^ # orientation must be specified as x,y,z # rotations in that order even though they # are applied in the order above # OK, that was hard, but now we're good! mfig.scene.add_actor(mars) # # make a lat/lon grid # line_x = [] line_y = [] line_z = [] line_o = [] line_t = np.linspace(0, 2*np.pi, 100) line_r = 1.0 longrid = np.arange(0, 360, 30) for lon in longrid: line_x.append(line_r*np.cos(np.deg2rad(lon))*np.cos(line_t)) line_x.append([0]) line_y.append(line_r*np.sin(np.deg2rad(lon))*np.cos(line_t)) line_y.append([0]) line_z.append(line_r*np.sin(line_t)) line_z.append([0]) line_o.append(np.ones_like(line_t)) line_o.append([0]) latgrid = np.arange(-90, 90, 30)[1:] for lat in latgrid: line_x.append(line_r*np.cos(np.deg2rad(lat))*np.cos(line_t)) line_x.append([0]) line_y.append(line_r*np.cos(np.deg2rad(lat))*np.sin(line_t)) line_y.append([0]) line_z.append(line_r*np.sin(np.deg2rad(lat))*np.ones_like(line_t)) line_z.append([0]) line_o.append(np.ones_like(line_t)) line_o.append([0]) line_x = np.concatenate(line_x) line_y = np.concatenate(line_y) line_z = np.concatenate(line_z) line_o = np.concatenate(line_o) linearray = [np.matmul(rmat, [x, y, z]) for x, y, z in zip(line_x, line_y, line_z)] (line_x, line_y, line_z) = np.transpose(np.array(linearray)) grid_linewidth = 0.25*mlab_pix/1000 mlab.plot3d(line_x, line_y, line_z, line_o, transparent=True, color=(0, 0, 0), tube_radius=None, line_width=grid_linewidth) # # compute the spacecraft orbit # # for the given time, we determine the orbit period maven_state = spice.spkezr('MAVEN', myet, 'MAVEN_MME_2000', 'NONE', 'MARS')[0] marsmu = spice.bodvrd('MARS', 'GM', 1)[1][0] maven_elements = spice.oscltx(maven_state, myet, marsmu) orbit_period = 1.001*maven_elements[-1] # make an etlist corresponding to the half-orbit ahead and behind orbit_subdivisions = 2000 etlist = (myet - orbit_period/2 + orbit_period*np.linspace(0, 1, num=orbit_subdivisions)) # get the position of the orbit in MSO statelist = spice.spkezr('MAVEN', etlist, 'MAVEN_MSO', 'NONE', 'MARS')[0] statelist = np.append(statelist, [statelist[0]], axis=0) # close the orbit poslist = np.transpose(statelist)[:3]/mars_radius # scale to Mars radius # plot the orbit orbitcolor = np.array([222, 45, 38])/255 # a nice red orbitcolor = tuple(orbitcolor) maven_x, maven_y, maven_z = poslist if show_orbit: mlab.plot3d(maven_x, maven_y, maven_z, color=orbitcolor, tube_radius=None, line_width=3*mlab_pix/1000) if not parallel_projection: # add a dot indicating the location of the Sun # this only makes sense with a perspective transform... with # orthographic coordinates we're always too far away # TODO: non parallel projection results in goofy images sun_distance = 10 sun_sphere = tvtk.SphereSource(center=(sun_distance, 0, 0), radius=1*np.pi/180*sun_distance, theta_resolution=sphere_resolution, phi_resolution=sphere_resolution) sun_sphere_mapper = tvtk.PolyDataMapper(input_connection=sun_sphere.output_port) sun_sphere = tvtk.Actor(mapper=sun_sphere_mapper) sun_sphere.property.ambient = 1.0 sun_sphere.property.lighting = False # mfig.scene.add_actor(sun_sphere) # put a line along the x-axis towards the sun # sunline_x=np.arange(0, 5000, 1) # mlab.plot3d(sunline_x, 0*sunline_x, 0*sunline_x, # color=(1.0,1.0,1.0), # tube_radius=None,line_width=6) # # Define camera coordinates # if view_from_periapsis: # to do this we need to get the position of apoapsis and the # orbit normal rlist = [np.linalg.norm(p) for p in np.transpose(poslist)] apoidx = np.argmax(rlist) apostate = spice.spkezr('MAVEN', etlist[apoidx], 'MAVEN_MSO', 'NONE', 'MARS')[0] camera_pos = -1.0 * apostate[:3] camera_pos = 5 * (camera_pos/np.linalg.norm(camera_pos)) camera_up = np.cross(apostate[:3], apostate[-3:]) camera_up = camera_up/np.linalg.norm(camera_up) parallel_projection = True if view_from_orbit_normal: # to do this we need to get the position of apoapsis and the # orbit normal rlist = [np.linalg.norm(p) for p in np.transpose(poslist)] apoidx = np.argmax(rlist) apostate = spice.spkezr('MAVEN', etlist[apoidx], 'MAVEN_MSO', 'NONE', 'MARS')[0] camera_up = apostate[:3] camera_up = camera_up/np.linalg.norm(camera_up) camera_pos = np.cross(apostate[:3], apostate[-3:]) camera_pos = 5 * (camera_pos/np.linalg.norm(camera_pos)) parallel_projection = True # construct an orthonormal coordinate system camera_pos = np.array(camera_pos) camera_pos_norm = camera_pos/np.linalg.norm(camera_pos) camera_up = (camera_up - camera_pos_norm*np.dot(camera_pos_norm, camera_up)) camera_up = camera_up/np.linalg.norm(camera_up) camera_right = np.cross(-camera_pos_norm, camera_up) # set location of camera and orthogonal projection camera = mlab.gcf().scene.camera if parallel_projection: camera_pos = 5*camera_pos_norm camera.parallel_projection = True camera.parallel_scale = extent # half box size else: # TODO: this results in goofy images, fix this camera.parallel_projection = False camera.view_angle = 50 camera.position = np.array(camera_pos) camera.focal_point = (0, 0, 0) camera.view_up = camera_up camera.clipping_range = (0.01, 5000) # # Set up lighting # # The only light is the Sun, which is fixed on the MSO +x axis. # VTK's default lights are uniform and don't fall off with # distance, which is what we want mfig.scene.light_manager.light_mode = "vtk" sun = mfig.scene.light_manager.lights[0] sun.activate = True sun_vec = (1, 0, 0) # The only way to set a light in mayavi/vtk is with respect to the # camera position. This means we have to get elevation/azimuth # coordinates for the Sun with respect to the camera, which could # be anywhere. # Here's how the coordinate system is defined: # elevation: # [-90 -- +90] # +90 places the light along the direction of camera_up # azimuth: # [-180 -- +180], # +90 is in the plane of camera_up and camera_right. # +/-180 is behind, pointing at the camera # -90 places light to the left # so, to get elevation we need to put the sun in scene coordinates sun_scene = np.matmul([camera_right, camera_up, camera_pos_norm], sun_vec) # elevation is the angle is latitude measured wrt the y-axis of # the scene sun_elevation = np.rad2deg(np.arcsin(np.dot(sun_scene, [0, 1, 0]))) # azimuth is the angle in the x-z plane, clockwise from the z-axis sun_azimuth = np.rad2deg(np.arctan2(sun_scene[0], sun_scene[2])) # now we can set the location of the light, computed to always lie # along MSO+x sun.azimuth = sun_azimuth sun.elevation = sun_elevation # set the brightness of the Sun based on the ambient lighting of # Mars so there is no washing out sun.intensity = 1.0 - mars.property.ambient # # Render the 3D scene # mfig.scene.disable_render = False # mfig.scene.anti_aliasing_frames = 0 # can uncomment to make # # rendering faster and uglier mlab.show() mode = 'rgba' if transparent_background else 'rgb' img = mlab.screenshot(mode=mode, antialiased=True) mlab.close(all=True) # 3D stuff ends here # # Draw text and labels in matplotlib # fig, ax = plt.subplots(1, 1, dpi=400*mlab_pix/1000, figsize=(2.5, 2.5)) ax.imshow(img) # put an arrow along the orbit direction if show_orbit: arrow_width = 5 arrow_length = 1.5*arrow_width # by default, draw the arrow at the closest point on the orbit # to the viewer arrowidx = np.argmax([np.dot(camera_pos_norm, p) for p in np.transpose(poslist)]) if view_from_periapsis: # draw the arrow 45 degrees after periapsis arrowidx = np.argmax( [np.dot( (camera_right + camera_pos_norm)/np.sqrt(2), p) for p in np.transpose(poslist)]) if view_from_orbit_normal: # draw the arrow 45 degrees after periapsis arrowidx = np.argmax( [np.dot( (camera_right-camera_up)/np.sqrt(2.), p) for p in np.transpose(poslist)]) arrowetlist = etlist[arrowidx] + 5*60*np.array([0, 1]) arrowstatelist = spice.spkezr('MAVEN', arrowetlist, 'MAVEN_MSO', 'NONE', 'MARS')[0] arrowdir = arrowstatelist[1][:3] - arrowstatelist[0][:3] arrowdirproj = [np.dot(camera_right, arrowdir), np.dot(camera_up, arrowdir)] arrowdirproj = arrowdirproj/np.linalg.norm(arrowdirproj) arrowloc = np.transpose(poslist)[arrowidx] arrowlocproj = np.array([np.dot(camera_right, arrowloc), np.dot(camera_up, arrowloc)]) arrowlocdisp = (arrowlocproj + extent)/extent/2 arrow = ax.annotate('', xytext=(arrowlocdisp - 0.05*arrowdirproj), xy=(arrowlocdisp + 0.05*arrowdirproj), xycoords='axes fraction', textcoords='axes fraction', arrowprops=dict(facecolor=orbitcolor, edgecolor='none', width=0, headwidth=arrow_width, headlength=arrow_length)) # label the poles if view_from_periapsis: label_poles = True if view_from_orbit_normal: label_poles = True if label_poles is None: label_poles = False if label_poles: # label the north and south pole if they are visible def label_pole(loc, lbl): polepos = np.matmul(rmat, loc) poleposproj = np.array([np.dot(camera_right, polepos), np.dot(camera_up, polepos)]) poleposdisp = (poleposproj+extent)/extent/2 # determine if the north pole is visible polevis = (not (np.linalg.norm([poleposproj]) < 1 and np.dot(camera_pos, polepos) < 0)) if polevis: polelabel = ax.text(*poleposdisp, lbl, transform=ax.transAxes, color='#888888', ha='center', va='center', size=4, zorder=1) # outline the letter polelabel.set_path_effects([ path_effects.withStroke(linewidth=0.75, foreground='k')]) label_pole([0, 0, 1], 'N') label_pole([0, 0, -1], 'S') if show_orbit: # add a mark for periapsis and apoapsis rlist = [np.linalg.norm(p) for p in np.transpose(poslist)] # find periapsis/apoapsis def label_apsis(apsis_fn, label, **kwargs): apsisidx = apsis_fn(rlist) apsispos = np.transpose(poslist)[apsisidx] apsisposproj = np.array([np.dot(camera_right, apsispos), np.dot(camera_up, apsispos)]) apsisposdisp = (apsisposproj + extent)/extent/2 apsisvis = (not (np.linalg.norm([apsisposproj]) < 1 and np.dot(camera_pos, apsispos) < 0)) if apsisvis: apsis = mpatches.CirclePolygon(apsisposdisp, 0.015, resolution=4, transform=ax.transAxes, fc=orbitcolor, lw=0, zorder=10) ax.add_patch(apsis) ax.text(*apsisposdisp, label, transform=ax.transAxes, color='k', ha='center', size=4, zorder=10, **kwargs) label_apsis(np.argmin, 'P', va='center_baseline') label_apsis(np.argmax, 'A', va='center') if show_maven: # add a dot for the spacecraft location mavenpos = spice.spkezr('MAVEN', myet, 'MAVEN_MSO', 'NONE', 'MARS')[0][:3]/mars_radius mavenposproj = np.array([np.dot(camera_right, mavenpos), np.dot(camera_up, mavenpos)]) mavenposdisp = (mavenposproj + extent)/extent/2 mavenvis = (not (np.linalg.norm([mavenposproj]) < 1 and np.dot(camera_pos, mavenpos) < 0)) if mavenvis: maven = mpatches.Circle(mavenposdisp, 0.012, transform=ax.transAxes, fc=orbitcolor, lw=0, zorder=11) ax.add_patch(maven) ax.text(*mavenposdisp, 'M', transform=ax.transAxes, color='k', ha='center', va='center_baseline', size=4, zorder=11) # suppress all whitespace around the plot plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) plt.margins(0, 0) ax.set_axis_off() ax.xaxis.set_major_locator(plt.NullLocator()) ax.yaxis.set_major_locator(plt.NullLocator()) fig.canvas.draw() rgb_array = fig2rgb_array(fig) if not show: plt.close(fig) return_coords = {'extent': extent, 'scale': '3395 km', 'camera_pos': camera_pos, 'camera_pos_norm': camera_pos_norm, 'camera_up': camera_up, 'camera_right': camera_right, 'orbit_coords': poslist} return rgb_array, return_coords
ds = xr.open_dataset(dem_file) nc = Dataset(environ['wrf_file']) # here I 'fill' the DEM westward with zeros (over the ocean) dx = ds.lon.diff('lon').mean().item() i = np.arange(ds.lon.min(), nc['XLONG'][0, :, :].min(), -dx)[:0:-1] Z = xr.DataArray(np.pad(ds.z, [(0, 0), (len(i), 0)], 'constant'), coords=[('lat', ds.lat), ('lon', np.r_[i, ds.lon])]) affine = create_affine(nc) x, y = affine(*np.meshgrid(Z.lon, Z.lat)) y = y + nc.dimensions['south_north'].size x, y = [v.reshape(Z.shape) for v in [x, y]] im = tvtk.JPEGReader(file_name=image_file) tex = tvtk.Texture(input_connection=im.output_port, interpolate=1) surf = mlab.surf(x.T, y.T, Z.values.T / 1000, color=(1, 1, 1)) surf.actor.enable_texture = True surf.actor.tcoord_generator_mode = 'plane' surf.actor.actor.texture = tex mlab.view(-120, 60, 200, [55, 55, -9]) iz = np.arange(0, top, vspace) ly, lx, nt = [ nc.dimensions[n].size for n in ['south_north', 'west_east', 'Time'] ] tr = lambda x: x.transpose(2, 1, 0) z, y, x = np.mgrid[slice(0, top, vspace), :ly, :lx]
""" Draping an image over a terrain surface """ from osgeo import gdal from tvtk.api import tvtk from mayavi import mlab import Image ds = gdal.Open('dem.tiff') data = ds.ReadAsArray() im1 = Image.open("ortofoto.jpg") im2 = im1.rotate(90) im2.save("/tmp/ortofoto90.jpg") bmp1 = tvtk.JPEGReader() bmp1.file_name="/tmp/ortofoto90.jpg" #any jpeg file my_texture=tvtk.Texture() my_texture.interpolate=0 my_texture.set_input(0,bmp1.get_output()) mlab.figure(size=(640, 800), bgcolor=(0.16, 0.28, 0.46)) surf = mlab.surf(data, color=(1,1,1), warp_scale=0.2) surf.actor.enable_texture = True surf.actor.tcoord_generator_mode = 'plane' surf.actor.actor.texture = my_texture mlab.show()
def mlabimg( x, y, z, path, figure=None, name=None, opacity=1.0, orientation=(0.0, 0.0, 0.0), scale=1.0, typ=None, ref_y_extent=None, ): """ Render image files in mayavi. Analogous to mlab.text3d. Parameters ---------- x : float x position of the text. y : float y position of the text. z : float z position of the text. path : string Path to the image file. color : tuple, optional color of the text given as rgb tuple. Default: ``(0, 0, 0)`` figure : Scene, optional Must be a Scene or None. name : string, optional the name of the vtk object created. opacity : float, optional The overall opacity of the vtk object. Must be a float. Default: 1.0 orientation : tuple, optional the angles giving the orientation of the text. If the text is oriented to the camera, these angles are referenced to the axis of the camera. If not, these angles are referenced to the z axis. Must be an array with shape (3,). scale : float, optional The vetical scale of the image, in figure units. typ : string, optional Here you can specify the image type. Supported: 'bmp', 'jpg', 'jpeg', 'png', 'pnm', 'dcm', 'tiff', 'ximg', 'dem', 'mha', 'mhd', 'mnc'. If set to ``None``, the file type is determined by its extension. Default: None. ref_y_extent : int, optional Reference vertical extent of the image to scale to. If set to ``None``, the image extent itself is used. Default: None Returns ------- surf : Surf Mayavi ``Surf`` class with the rendered image as texture. """ if typ is None: typ = os.path.splitext(path)[1][1:].lower() if typ not in IMREAD: raise ValueError("The file type is not supported: " + str(typ)) reader = IMREAD[typ] kwargs = {} if figure is not None: kwargs["figure"] = figure if name is not None: kwargs["name"] = name # load the image img = reader() img.file_name = path img.update() dim_x, dim_y = img.data_extent[1:4:2] # create the texture from the image texture = tvtk.Texture(input_connection=img.output_port, interpolate=0) # create the surface points if ref_y_extent is None: ref_y_extent = dim_y surfx, surfy = (np.mgrid[0:dim_x + 1, 0:dim_y + 1] * scale / ref_y_extent) surfz = np.zeros_like(surfx) # create the surface surf = mlab.surf(surfx, surfy, surfz, color=(1, 1, 1), opacity=opacity, warp_scale=1.0, reset_zoom=False, **kwargs) # add texture, position and orientation surf.actor.enable_texture = True surf.actor.tcoord_generator_mode = "plane" surf.actor.actor.texture = texture surf.actor.actor.orientation = orientation surf.actor.actor.position = (x, y, z) return surf
def manual_sphere(image_file): # caveat 1: flip the input image along its first axis img = plt.imread(image_file) # shape (N,M,3), flip along first dim outfile = image_file.replace('.jpg', '_flipped.jpg') # flip output along first dim to get right chirality of the mapping img = img[::-1, ...] plt.imsave(outfile, img) image_file = outfile # work with the flipped file from now on # parameters for the sphere R = 2 # radius of the sphere Nrad = 180 # points along theta and phi phi = np.linspace(0, 2 * np.pi, Nrad) # shape (Nrad,) theta = np.linspace(0, np.pi, Nrad) # shape (Nrad,) phigrid, thetagrid = np.meshgrid(phi, theta) # shapes (Nrad, Nrad) # compute actual points on the sphere x = R * np.sin(thetagrid) * np.cos(phigrid) y = R * np.sin(thetagrid) * np.sin(phigrid) z = R * np.cos(thetagrid) # create figure f = mlab.figure(size=(500, 500), bgcolor=(1, 1, 1)) # f.scene.movie_maker.record = True # create meshed sphere mesh = mlab.mesh(x, y, z) mesh.actor.actor.mapper.scalar_visibility = False mesh.actor.enable_texture = True # probably redundant assigning the texture later # load the (flipped) image for texturing img = tvtk.JPEGReader(file_name=image_file) texture = tvtk.Texture(input_connection=img.output_port, interpolate=1, repeat=0) mesh.actor.actor.texture = texture # tell mayavi that the mapping from points to pixels happens via a sphere # map is already given for a spherical mapping mesh.actor.tcoord_generator_mode = 'sphere' cylinder_mapper = mesh.actor.tcoord_generator # caveat 2: if prevent_seam is 1 (default), half the image is used to map half the sphere # use 360 degrees, might cause seam but no fake data cylinder_mapper.prevent_seam = 0 # mlab.view(180.0, 90.0, 17.269256680431845, [0.00010503, 0.00011263, 0.]) mlab.view(180.0, 90.0, 10, [0.00010503, 0.00011263, 0.]) n_images = 36 padding = len(str(n_images)) mlab.roll(90.0) @mlab.animate(delay=10, ui=False) def anim(): for i in range(n_images): mesh.actor.actor.rotate_z(360 / n_images) zeros = '0' * (padding - len(str(i))) filename = os.path.join(out_path, '{}_{}{}{}'.format(prefix, zeros, i, ext)) mlab.savefig(filename=filename) yield mlab.close(all=True) # cylinder_mapper.center = np.array([0,0,0]) # set non-trivial center for the mapping sphere if necessary # print(mlab.move()) a = anim() mlab.show() ffmpeg_fname = os.path.join(out_path, '{}_%0{}d{}'.format(prefix, padding, ext)) cmd = 'ffmpeg -f image2 -r {} -i {} -vcodec mpeg4 -y {}.mp4'.format( fps, ffmpeg_fname, prefix) subprocess.check_output(['bash', '-c', cmd]) [os.remove(f) for f in os.listdir(out_path) if f.endswith(ext)]
g = tvtk.Glyph3D(scale_mode='data_scaling_off', vector_mode='use_vector', input=pd) # Note that VTK's vtkGlyph.SetSource is special because it has two # call signatures: SetSource(src) and SetSource(int N, src) (which # sets the N'th source). In tvtk it is represented as both a property # and as a method. Using the `source` property will work fine if all # you want is the first `source`. OTOH if you want the N'th `source` # use get_source(N). g.source = cs.output m = tvtk.PolyDataMapper(input=g.output) a = tvtk.Actor(mapper=m) # Read the texture from image and set the texture on the actor. If # you don't like this image, replace with your favorite -- any image # will do (you must use a suitable reader though). img = tvtk.JPEGReader(file_name='images/masonry.jpg') t = tvtk.Texture(input=img.output, interpolate=1) a.texture = t # Renderwindow stuff and add actor. rw = tvtk.RenderWindow(size=(600, 600)) ren = tvtk.Renderer(background=(0.5, 0.5, 0.5)) rw.add_renderer(ren) rwi = tvtk.RenderWindowInteractor(render_window=rw) ren.add_actor(a) rwi.initialize() rwi.start()
def load_texture(filename): img = tvtk.JPEGReader(file_name=filename) texture = tvtk.Texture(input_connection=img.output_port, interpolate=0) return texture
# create a 2d view of the array ary_2d = ary[:] ary_2d.shape = sz[0] * sz[1], sz[2] img.point_data.scalars = ary_2d else: raise ValueError, "ary must be 3 dimensional." return img sz = (256, 256, 3) array_3d = zeros(sz, uint8) img = image_from_array(array_3d) t = tvtk.Texture(input=img, interpolate=1) a.texture = t # Renderwindow stuff and add actor. rw = tvtk.RenderWindow(size=(600, 600)) ren = tvtk.Renderer(background=(0.1, 0.2, 0.4)) rw.add_renderer(ren) rwi = tvtk.RenderWindowInteractor(render_window=rw) ren.add_actor(a) rwi.initialize() # create a little wave to slide across the image. wave = 1 / sqrt(2 * pi) * exp(-arange(-2, 2, .05)**2 / 2) * 255 # have to use += here because = doesn't respect broadcasting correctly. array_3d[:len(wave)] += wave.astype(uint8)[:, None, None]