Пример #1
0
def doit(plotfile):

    ds = yt.load(plotfile)
    ds.periodicity = (True, True, True)

    field = ('boxlib', 'radial_velocity')
    ds._get_field_info(field).take_log = False

    sc = Scene()

    # add a volume: select a sphere
    center = (0, 0, 0)
    R = (5.e8, 'cm')

    dd = ds.sphere(center, R)

    vol = VolumeSource(dd, field=field)
    vol.use_ghost_zones = True

    sc.add_source(vol)

    # transfer function
    vals = [-5.e6, -2.5e6, -1.25e6, 1.25e6, 2.5e6, 5.e6]
    sigma = 3.e5

    tf = yt.ColorTransferFunction((min(vals), max(vals)))

    tf.clear()
    cm = "coolwarm"
    for v in vals:
        tf.sample_colormap(v, sigma**2, colormap=cm)  #, alpha=0.2)

    sc.get_source(0).transfer_function = tf

    cam = sc.add_camera(ds, lens_type="perspective")
    cam.resolution = (1280, 720)
    cam.position = 1.5 * ds.arr(np.array([5.e8, 5.e8, 5.e8]), 'cm')

    # look toward the center -- we are dealing with an octant
    center = ds.domain_left_edge
    normal = (center - cam.position)
    normal /= np.sqrt(normal.dot(normal))

    cam.switch_orientation(normal_vector=normal, north_vector=[0., 0., 1.])
    cam.set_width(ds.domain_width)

    #sc.annotate_axes()
    #sc.annotate_domain(ds)

    sc.render()
    sc.save("subchandra_test.png", sigma_clip=6.0)
    sc.save_annotated(
        "subchandra_test_annotated.png",
        text_annotate=[[(0.05, 0.05), "t = {}".format(ds.current_time.d),
                        dict(horizontalalignment="left")],
                       [(0.5, 0.95),
                        "Maestro simulation of He convection on a white dwarf",
                        dict(color="y",
                             fontsize="24",
                             horizontalalignment="center")]])
Пример #2
0
def assignValues(redVal, greenVal, blueVal):
    ctf = yt.ColorTransferFunction((-10.0, -5.0))
    ctf.add_layers(8)
    ctf.red.y = redVal
    ctf.green.y = np.asarray([random.random() for _ in xrange(256)])
    ctf.blue.y = np.asarray([random.random() for _ in xrange(256)])
    '''
        ctf.red.y = smooth(redVal, 20)
        ctf.green.y = smooth(greenVal, 20)
        ctf.blue.y = smooth(blueVal, 20)
        '''
    ctf.plot("static/img/result.png")
Пример #3
0
def doit(plotfile):

    ds = yt.load(plotfile)
    ds.periodicity = (True, True, True)

    cm = "coolwarm"

    field = ('boxlib', 'radial_velocity')
    ds._get_field_info(field).take_log = False

    sc = Scene()

    # add a volume: select a sphere
    #center = (0, 0, 0)
    #R = (5.e8, 'cm')

    #dd = ds.sphere(center, R)

    vol = VolumeSource(ds, field=field)
    sc.add_source(vol)

    # transfer function
    vals = [-5.e5, -2.5e5, -1.25e5, 1.25e5, 2.5e5, 5.e5]
    sigma = 3.e4

    tf = yt.ColorTransferFunction((min(vals), max(vals)))

    tf.clear()
    cm = "coolwarm"
    for v in vals:
        tf.sample_colormap(v, sigma**2, colormap=cm)  #, alpha=0.2)

    sc.get_source(0).transfer_function = tf

    cam = sc.add_camera(ds, lens_type="perspective")
    cam.resolution = (1080, 1080)
    cam.position = 1.0 * ds.domain_right_edge

    # look toward the center -- we are dealing with an octant
    center = ds.domain_left_edge
    normal = (center - cam.position)
    normal /= np.sqrt(normal.dot(normal))

    cam.switch_orientation(normal_vector=normal, north_vector=[0., 0., 1.])
    cam.set_width(ds.domain_width)

    sc.camera = cam
    #sc.annotate_axes(alpha=0.05)
    #sc.annotate_domain(ds, color=np.array([0.05, 0.05, 0.05, 0.05]))
    #sc.annotate_grids(ds, alpha=0.05)

    sc.render()
    sc.save("{}_radvel".format(plotfile), sigma_clip=4.0)
Пример #4
0
def ytrender(filename,
             vmin=None,
             vmax=None,
             useLog=False,
             nLayer=10,
             cmap='gist_rainbow',
             colorwidth=0.005,
             phi=0.5,
             theta=1.570796,
             outfile=None,
             usedomain=(0.25, 0.6, 0.6),
             Xrays=768,
             Yrays=512):
    cube = fits.getdata(filename)
    if not vmin:
        vmin = np.percentile(cube, 50)
    if not vmax:
        vmax = np.percentile(cube, 99.5)
    if len(cube.shape) > 3:
        cube = cube.squeeze()
    if not outfile:
        outfile = filename.replace('fits', 'png')
    cube[np.isnan(cube)] = np.nanmin(cube)
    data = dict(density=cube)
    pf = yt.load_uniform_grid(data, cube.shape, 9e16)
    tf = yt.ColorTransferFunction((vmin, vmax))
    c = (pf.domain_right_edge + pf.domain_left_edge) / 2.0
    tf.add_layers(nLayer,
                  w=colorwidth,
                  colormap=cmap,
                  alpha=np.logspace(-1.0, 0, nLayer))
    c = [0.51, 0.51, 0.51]  # centre of the image
    L = [
        np.cos(theta),
        np.sin(theta) * np.cos(phi),
        np.sin(theta) * np.sin(phi)
    ]  # normal vector
    W = usedomain

    cam = pf.camera(c,
                    L,
                    W, (Xrays, Yrays),
                    tf,
                    no_ghost=False,
                    north_vector=[1.0, 0, 0],
                    fields=['density'],
                    log_fields=[useLog])
    image = cam.snapshot(outfile, 8.0)
Пример #5
0
    def __init__(self,data,**kwargs):
        """
        Parameters
        ----------
        data : ndarray
            the data to generate transfer function
        **kwargs : type
            bounds (list):
                min/max for transfer function bounds, defaults to
                [data.min(), data.max()]

        """

        self.data=data[~np.isnan(data)]
        self.bounds=kwargs.get('bounds',[self.data.min(),self.data.max()])
        self.tf=yt.ColorTransferFunction((self.bounds[0],self.bounds[1]))
        self.dvbins= np.linspace(self.bounds[0],self.bounds[1],self.tf.nbins)
        self.dvbins_c=(self.dvbins[0:-1]+self.dvbins[1:])/2 # bin centers
        self.histData,_=self.calcHist()

        return
Пример #6
0
 def setup(self):
     if yt.__version__.startswith('3'):
         self.ds = yt.load(self.dsname)
         self.ad = self.ds.all_data()
         self.field_name = "density"
     else:
         self.ds = load(self.dsname)
         self.ad = self.ds.h.all_data()
         self.field_name = "Density"
     # Warmup hdd
     self.ad[self.field_name]
     if yt.__version__.startswith('3'):
         mi, ma = self.ad.quantities['Extrema'](self.field_name)
         self.tf = yt.ColorTransferFunction(
             (np.log10(mi) + 1, np.log10(ma)))
     else:
         mi, ma = self.ad.quantities['Extrema'](self.field_name)[0]
         self.tf = ColorTransferFunction((np.log10(mi) + 1, np.log10(ma)))
     self.tf.add_layers(5, w=0.02, colormap="spectral")
     self.c = [0.5, 0.5, 0.5]
     self.L = [0.5, 0.2, 0.7]
     self.W = 1.0
     self.Npixels = 512
Пример #7
0
def make_13co_faces(
    ytcube=yt13co,
    outdir='yt_renders_13CO',
    size=1024,
    scale=1200.,
    nframes=180,
    movie=True,
    camera_angle=[0, 0, 1],
    north_vector=[1, 0, 0],
    rot_vector1=[1, 0, 0],
    rot_vector2=[0.5, 0.5, 0.0],
    rot_vector3=[0.0, 0.5, 0.5],
):

    tf = yt.ColorTransferFunction([0, 30], grey_opacity=True)
    #tf.map_to_colormap(0.1,5,colormap='Reds')
    tf.add_gaussian(2, 1, [1.0, 0.8, 0.0, 1.0])
    tf.add_gaussian(3, 2, [1.0, 0.5, 0.0, 1.0])
    tf.add_gaussian(5, 3, [1.0, 0.0, 0.0, 1.0])
    tf.add_gaussian(10, 5, [1.0, 0.0, 0.0, 0.5])
    tf.map_to_colormap(10, 30, colormap=red, scale=1)

    center = ytcube.dataset.domain_dimensions / 2.
    ims = []
    for camera_angle in ([1, 0, 0], [0, 1, 0], [0, 0, 1]):
        cam = ytcube.dataset.h.camera(center,
                                      camera_angle,
                                      scale,
                                      size,
                                      tf,
                                      north_vector=north_vector,
                                      fields='flux')

        im = cam.snapshot()
        ims.append(im)

    save_images(ims, 'render_13co_faceon_1024')
Пример #8
0
def img_onestep(filename):
    # Load the data
    ds = yt.load(filename)
    ad = ds.all_data()

    # Calculate the z position of the box.
    # You can use ds.domain_right_edge[2] instead. However, if a moving window
    # was used in the simulation, the rendering shows some jitter.
    # This is because a cell is added in z at some iterations but not all.
    # These lines calculate this jitter z_shift and remove it from the camera position and focus
    iteration = int(filename[-5:])
    dt = 1. / clight * 1. / np.sqrt(
        (1. / ad['dx'][-1]**2 + 1. / ad['dy'][-1]**2 + 1. / ad['dz'][-1]**2))
    z_front = dt * float(iteration) * clight
    z_shift = z_front - ds.domain_right_edge[2]

    # Create a yt source object for the level1 patch
    if do_patch:
        box_patch = yt.visualization.volume_rendering.render_source.BoxSource(
            left_edge=ds.index.grids[1].LeftEdge +
            np.array([0., 0., z_shift]) * yt.units.meter,
            right_edge=ds.index.grids[1].RightEdge +
            np.array([0., 0., z_shift]) * yt.units.meter,
            color=[1., 0.1, 0.1, .01])

    # Handle 2 populations of particles: beam and plasma electrons
    if do_particles:
        point0 = plot_species(species0, ad, 2, .01, 1.)
        point1 = plot_species(species1, ad, 1, .002, 20.e-6)
    sc = yt.create_scene(ds, field=field)

    # Set camera properties
    cam = sc.camera
    dom_length = ds.domain_width[2].v
    cam.set_width(ds.quan(dom_length, yt.units.meter))
    cam.position = ds.domain_center + camera_position + np.array(
        [0., 0., z_shift]) * yt.units.meter
    cam.focus = ds.domain_center + np.array([0., 0., z_shift]) * yt.units.meter
    cam.resolution = resolution
    # Field rendering properties
    source = sc[0]
    source.set_field(field)
    source.set_log(False)
    source.use_ghost_zones = True
    bounds = (-my_max, my_max)
    tf = yt.ColorTransferFunction(bounds)
    w = (.01 * my_max)**2
    # Define the transfer function for 3d rendering
    # 3 isocontours for negative field values
    # The sharpness of the contour is controlled by argument width
    tf.add_gaussian(-.04 * my_max, width=8 * w, height=[0.1, 0.1, 1.0, 0.02])
    tf.add_gaussian(-.2 * my_max, width=5 * w, height=[0.1, 0.1, 1.0, 0.05])
    tf.add_gaussian(-.6 * my_max, width=w, height=[0.0, 0.0, 1.0, 0.3])
    # 3 isocontours for positive field values
    tf.add_gaussian(.04 * my_max, width=8 * w, height=[1.0, 1.0, 0.2, 0.02])
    tf.add_gaussian(.2 * my_max, width=5 * w, height=[1.0, 1.0, 0.2, 0.05])
    tf.add_gaussian(.6 * my_max, width=w, height=[1.0, 1.0, 0.0, 0.3])
    source.tfh.tf = tf
    source.tfh.bounds = bounds
    source.tfh.set_log(False)
    source.tfh.tf.grey_opacity = True
    if do_particles:
        sc.add_source(point0)
        sc.add_source(point1)
    if do_patch:
        sc.add_source(box_patch)
    sc.save('./img_' + filename[-8:] + '.png', sigma_clip=1.)
Пример #9
0
p.annotate_contour(("openPMD", "absE_x"), ncont=5, clim=(1e12, 1e11))
p.show()

# There are a lot of another annotations and additional parameters for the visualisation on the yt website

#Plotting time series
print "3: Plotting time series"

# Only load with yt.load but use ? or * instead of characters creats a time series
timeseries = yt.load("data0000??00.h5")

# Get every dataset from timeseries and plot it
for ds in timeseries:
    p = yt.SlicePlot(ds, "y", "E_x")
    p.show()

# Volume Rendering of 3D Data
# As OpenPMD 3D Datasets are not tested yet we use other sample Data
#Sample Data from http://yt-project.org/data/
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")

#This creates and shows a 3D Model of the sample Data
print "4: Volume Rendering of Example Galaxy"
tf = yt.ColorTransferFunction((-28, -25))
tf.add_layers(4, w=0.03)
cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'),
                512,
                tf,
                no_ghost=False)
cam.show(clip_ratio=4.0)
Пример #10
0
  tfh.set_field('density')
  tfh.set_log(True)
  tfh.set_bounds()
  tfh.build_transfer_function()
  tfh.tf.add_layers(5, colormap='inferno')

  # Grab the first render source and set it to use the new transfer function
  render_source = sc.get_source()
  render_source.transfer_function = tfh.tf
  #source.tfh.tf = tfh
  #source.tfh.bounds = bounds
elif(1):
  # select a continous distribution for values in the chosen range
  # transparency is linear
  # http://yt-project.org/doc/visualizing/volume_rendering.html
  tf = yt.ColorTransferFunction(np.log10(bounds))

  def linramp(vals, minval, maxval):
        return (vals - vals.min())/(vals.max() - vals.min())

  tf.map_to_colormap(np.log10(1.e-1), np.log10(1.e+3), colormap='inferno',scale_func=linramp)

  source.tfh.tf     = tf
  source.tfh.bounds = bounds

else:
  # transfer fucntion that select a discrete set of values in the chosen range
  # transparency is linear but it is awfu
  l
  # bounds = (1.E-01, 80.0)  # H2 density
  #bounds = (1.E-3, 0.1)      # H2 fraction
Пример #11
0
import yt
import numpy as np

# Follow the simple_volume_rendering cookbook for the first part of this.
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
ad = ds.all_data()
mi, ma = ad.quantities.extrema("density")

# Set up transfer function
tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
tf.add_layers(6, w=0.05)

# Set up camera paramters
c = [0.5, 0.5, 0.5]  # Center
L = [1, 1, 1]  # Normal Vector
W = 1.0  # Width
Nvec = 512  # Pixels on a side

# Specify a north vector, which helps with rotations.
north_vector = [0., 0., 1.]

# Find the maximum density location, store it in max_c
v, max_c = ds.find_max('density')

# Initialize the Camera
cam = ds.camera(c, L, W, (Nvec, Nvec), tf, north_vector=north_vector)
frame = 0

# Do a rotation over 5 frames
for i, snapshot in enumerate(cam.rotation(np.pi, 5, clip_ratio=8.0)):
    snapshot.write_png('camera_movement_%04i.png' % frame)
Пример #12
0
def doit(plotfile):

    ds = yt.load(plotfile)
    ds.periodicity = (True, True, True)

    field = ('boxlib', 'density')
    ds._get_field_info(field).take_log = True

    sc = Scene()

    # add a volume: select a sphere
    vol = VolumeSource(ds, field=field)
    vol.use_ghost_zones = True

    sc.add_source(vol)

    # transfer function
    vals = [-1, 0, 1, 2, 3, 4, 5, 6, 7]
    #vals = [0.1, 1.0, 10, 100., 1.e4, 1.e5, 1.e6, 1.e7]
    sigma = 0.1

    tf = yt.ColorTransferFunction((min(vals), max(vals)))

    tf.clear()
    cm = "coolwarm"
    cm = "spectral"
    for v in vals:
        if v < 3:
            alpha = 0.1
        else:
            alpha = 0.5
        tf.sample_colormap(v, sigma**2, colormap=cm, alpha=alpha)

    sc.get_source(0).transfer_function = tf

    # for spherical, youtube recommends an "equirectangular" aspect ratio
    # (2:1), suggested resolution of 8192 x 4096
    # see: https://support.google.com/youtube/answer/6178631?hl=en
    #
    # also see: http://yt-project.org/docs/dev/cookbook/complex_plots.html#various-lens-types-for-volume-rendering
    # the 2:1 is 2*pi in phi and pi in theta
    cam = sc.add_camera(ds, lens_type="spherical")
    #cam.resolution = (8192, 4096)
    cam.resolution = (4096, 2048)

    # look toward the +x initially
    cam.focus = ds.arr(np.array([ds.domain_left_edge[0], 0.0, 0.0]), 'cm')

    # center of the domain -- eventually we might want to do the
    # center of mass
    cam.position = ds.arr(np.array([0.0, 0.0, 0.0]), 'cm')

    # define up
    cam.north_vector = np.array([0., 0., 1.])

    normal = (cam.focus - cam.position)
    normal /= np.sqrt(normal.dot(normal))

    cam.switch_orientation(normal_vector=normal, north_vector=[0., 0., 1.])

    # there is no such thing as a camera width -- the entire volume is rendered
    #cam.set_width(ds.domain_width)

    #sc.annotate_axes()
    #sc.annotate_domain(ds)

    pid = plotfile.split("plt")[1]
    sc.render()
    sc.save("wdmerger_{}_spherical.png".format(pid), sigma_clip=6.0)
Пример #13
0
from yt.visualization.volume_rendering.api import Scene, create_volume_source

ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")

# create a scene and add volume sources to it

sc = Scene()

# Add density

field = "density"

vol = create_volume_source(ds, field=field)
vol.use_ghost_zones = True

tf = yt.ColorTransferFunction([-28, -25])
tf.clear()
tf.add_layers(4, 0.02, alpha=np.logspace(-3, -1, 4), colormap="winter")

vol.set_transfer_function(tf)
sc.add_source(vol)

# Add temperature

field = "temperature"

vol2 = create_volume_source(ds, field=field)
vol2.use_ghost_zones = True

tf = yt.ColorTransferFunction([4.5, 7.5])
tf.clear()
Пример #14
0
    }],
    'tf_bounds': [-10., 10.],
    'sigma_clip':
    .4
}
out_dir = './output/' + fname.split('.')[0]  # output directory for figures

# load the model
model = sm.netcdf(fname, settings['interp'])
shortname = fname.split('.')[0]
datafld = settings['interp']['field']
data = {datafld: model.interp['data'][datafld]}  # dict container for yt scene

# add the transfer functions
bnds = settings.get('tf_bounds', [-5, 5])
tf = yt.ColorTransferFunction((bnds[0], bnds[1]))
for TFtoadd in settings['tfs']:
    v = TFtoadd['values']
    if TFtoadd['tftype'] == 'step':
        tf.add_step(v[0], v[1], v[2])
    elif TFtoadd['tftype'] == 'gaussian':
        tf.add_gaussian(v[0], v[1], v[2])

# plot the TF with a histogram
x = np.linspace(bnds[0], bnds[1], tf.nbins)
y = tf.funcs[3].y
w = np.append(x[1:] - x[:-1], x[-1] - x[-2])
colors = np.array([tf.funcs[0].y, tf.funcs[1].y, tf.funcs[2].y,
                   tf.funcs[3].y]).T
fig = plt.figure(figsize=[6, 3])
ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])
Пример #15
0
def makemovie(name):
    data_name = 'cut_co/xycut_co/' + name

    ds = yt.load(data_name, nan_mask=0.0)

    sc = yt.create_scene(ds, "intensity")
    source = sc[0]
    sc.annotate_domain(ds, color=[128 / 255, 128 / 255, 128 / 255, 0.01])
    sc.annotate_axes(alpha=0.1)
    source.set_log(False)
    source.tfh.build_transfer_function()
    sc.camera.switch_orientation(normal_vector=[-0.1, -0.1, -9])
    sc.camera.set_resolution((1024, 1024))
    sc.camera.roll(radians(-180))

    header = fits.getheader(data_name)
    data = fits.getdata(data_name)
    obj1 = header['object']
    obj = obj1.strip()

    max1 = ds.r['intensity'].max()

    max = max1 / K

    sig = mad_std(
        data, ignore_nan=True
    )  # Older astropy version does not support ignore_nan argument. Delete it and the code will run.

    tf2 = yt.ColorTransferFunction((sig, max))

    tf2.add_gaussian(3 * sig, sig / 100, [106 / 255, 90 / 255, 205 / 255, 0.6])

    tf2.add_gaussian(5 * sig, sig / 100, [30 / 255, 144 / 255, 1, 1.0])

    if max > 18 * sig:
        tf2.add_gaussian(0.4 * max, sig / 100, [0, 1, 127 / 255, 1.4])

        tf2.add_gaussian(0.54 * max, sig / 100, [1, 215 / 255, 0, 1.8])

        tf2.add_gaussian(0.68 * max, sig / 100, [1, 0, 0, 2.8])

        tf2.add_gaussian(0.85 * max, sig / 100, [240 / 255, 1, 1, 3.8])

    else:

        tf2.add_gaussian(7 * sig, sig / 100, [0, 1, 127 / 255, 1.4])

        tf2.add_gaussian(9 * sig, sig / 100, [1, 215 / 255, 0, 1.8])

        tf2.add_gaussian(11.5 * sig, sig / 100, [1, 0, 0, 2.8])

        tf2.add_gaussian(13.5 * sig, sig / 100, [240 / 255, 1, 1, 3.8])

    command3 = 'mkdir cut_co/' + obj
    command4 = 'mkdir cut_co/' + obj + '/movie'
    call(command3, shell=True)
    call(command4, shell=True)

    save_place = 'cut_co/' + obj + '/movie'

    source.set_transfer_function(tf2)
    source.tfh.tf = tf2

    #-----------------------------makeing the colormap------------------------------------------------
    if max > 18 * sig:
        white_patch = mpatches.Patch(color='#F0FFFF', label='0.85 max')
        red_patch = mpatches.Patch(color='#FF0000', label='0.68 max')
        yellow_patch = mpatches.Patch(color='#FFD700', label='0.54 max')
        green_patch = mpatches.Patch(color='#00FF7F', label='0.4 max')
        blue_patch = mpatches.Patch(color='#1E90FF', label='5 $\sigma$')
        purple_patch = mpatches.Patch(color='#6A5ACD', label='3 $\sigma$')
    else:
        white_patch = mpatches.Patch(color='#F0FFFF', label='13.5 $\sigma$')
        red_patch = mpatches.Patch(color='#FF0000', label='11.5 $\sigma$')
        yellow_patch = mpatches.Patch(color='#FFD700', label='9 $\sigma$')
        green_patch = mpatches.Patch(color='#00FF7F', label='7 $\sigma$')
        blue_patch = mpatches.Patch(color='#1E90FF', label='5 $\sigma$')
        purple_patch = mpatches.Patch(color='#6A5ACD', label='3 $\sigma$')

    source.tfh.plot(save_place + '/transfer_function.png')

    img = mpimg.imread(save_place + '/transfer_function.png')
    fig, ax = plt.subplots(figsize=(8, 4))

    imgnew = np.delete(img, np.arange(0, 77), 1)

    imgplot = plt.imshow(imgnew)

    ax.axis('off')

    plt.legend(handles=[
        white_patch, red_patch, yellow_patch, green_patch, blue_patch,
        purple_patch
    ],
               loc=(0.91, 0.4),
               prop={'size': 9})

    plt.savefig('cut_co/colormaps/colormap_' + obj + '.png',
                bbox_inches='tight')

    #----------------------------------------------------------------------------------------------------

    i = 0
    j = 1
    while i < 180:
        sc.camera.yaw(
            radians(rotate_angle_per_time))  #rotate about the north vector
        sc.save(save_place + '/pic' + str(j) + '.png', sigma_clip=4.5)
        i += rotate_angle_per_time
        j += 1


#I zoomed in 1.07 times for 9 times/frames
    k = 0
    l = j
    while k <= 8:
        sc.camera.zoom(1.07)
        sc.camera.yaw(radians(rotate_angle_per_time))
        sc.save(save_place + '/pic' + str(l) + '.png', sigma_clip=4.5)
        k += 1
        l += 1

    a = l
    b = 0
    while b < 360:
        sc.camera.yaw(radians(rotate_angle_per_time))
        sc.save(save_place + '/pic' + str(a) + '.png', sigma_clip=4.5)
        b += rotate_angle_per_time
        a += 1

    num_of_frames = str(a)

    command = "ffmpeg -r " + frames_per_sec + " -f image2 -s 1920x1080 -start_number 1 -i " + save_place + "/pic%d.png -vframes " + num_of_frames + " -vcodec libx264 -crf 15  -b 5000K -vb 20M -pix_fmt yuv420p " + save_place + "/movie.mp4"

    command2 = "ffmpeg -i " + save_place + "/movie.mp4 -vf drawtext=\"fontfile=/Library/Fonts/SignPainter.ttc: \\" + obj + ": fontcolor=white: fontsize=33: x=(w-text_w)/8: y=(h-text_h)/8\" -codec:a copy cut_co/movies/" + obj + ".mp4"

    call(command, shell=True)

    call(command2, shell=True)
Пример #16
0
import yt

ds = yt.load("RD0023/RD0023")
sc = yt.create_scene(ds)

im, sc = yt.volume_render(ds)
cam = sc.camera
sc.camera.resolution = (800, 800)
for i in cam.iter_zoom(100.0, 10):
    source = sc.sources['source_00']
    sc.add_source(source)
    tf = yt.ColorTransferFunction((-31, -26))
    tf.add_layers(5, w=0.01)
    source.set_transfer_function(tf)
    sc.render()
    sc.save("zoom_%04i.png" % i)

    #sc.camera.set_width(ds.quan(1, 'Mpc'))
#sc.show()
#sc.show(sigma_clip=4)
#sc.save('render_001',sigma_clip=4)
Пример #17
0
def render_13co(
    ytcube=yt13co,
    outdir='yt_renders_13CO',
    size=512,
    scale=1200.,
    nframes=180,
    movie=True,
    camera_angle=[0, 0, 1],
    north_vector=[1, 0, 0],
    rot_vector1=[1, 0, 0],
    rot_vector2=[0.5, 0.5, 0.0],
    rot_vector3=[0.0, 0.5, 0.5],
):

    if not os.path.exists(paths.mpath(outdir)):
        os.makedirs(paths.mpath(outdir))

    tf = yt.ColorTransferFunction([0, 30], grey_opacity=True)
    #tf.map_to_colormap(0.1,5,colormap='Reds')
    tf.add_gaussian(2, 1, [1.0, 0.8, 0.0, 1.0])
    tf.add_gaussian(3, 2, [1.0, 0.5, 0.0, 1.0])
    tf.add_gaussian(5, 3, [1.0, 0.0, 0.0, 1.0])
    tf.add_gaussian(10, 5, [1.0, 0.0, 0.0, 0.5])
    tf.map_to_colormap(10, 30, colormap=red, scale=1)

    center = ytcube.dataset.domain_dimensions / 2.
    cam = ytcube.dataset.h.camera(center,
                                  camera_angle,
                                  scale,
                                  size,
                                  tf,
                                  north_vector=north_vector,
                                  fields='flux')

    im = cam.snapshot()

    #images = [im]

    if movie:
        pb = ProgressBar(nframes * 2 + 30)
        for ii, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector1)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir, "%04i.png" % (ii))),
                         rescale=False)
            pb.update(ii)
        for jj, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector2)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir, "%04i.png" % (ii + jj))),
                         rescale=False)
            pb.update(ii + jj)
        for kk, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector3)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir, "%04i.png" % (ii + jj + kk))),
                         rescale=False)
            pb.update(ii + jj + kk)

        TheBrick = ytcube.world2yt([0.253, 0.016, 35])
        for LL, snapshot in enumerate(cam.move_to(TheBrick, 15)):
            #images.append(snapshot)
            snapshot.write_png(paths.mpath(
                os.path.join(outdir, '%04i.png' % (ii + jj + kk + LL))),
                               rescale=False)
            pb.update(ii + jj + kk + LL)
        for mm, snapshot in enumerate(cam.zoomin(5, 15)):
            #images.append(snapshot)
            snapshot.write_png(paths.mpath(
                os.path.join(outdir, '%04i.png' % (ii + jj + kk + LL + mm))),
                               rescale=False)
            pb.update(ii + jj + kk + LL + mm)
        for nn, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector1)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir,
                             "%04i.png" % (ii + jj + kk + LL + mm + nn))),
                         rescale=False)
            pb.update(ii + jj + kk + LL + mm + nn)
        for oo, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector2)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir,
                             "%04i.png" % (ii + jj + kk + LL + mm + nn + oo))),
                         rescale=False)
            pb.update(ii + jj + kk + LL + mm + nn + oo)
        for pp, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector3)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(
                    outdir,
                    "%04i.png" % (ii + jj + kk + LL + mm + nn + oo + pp))),
                         rescale=False)
            pb.update(ii + jj + kk + LL + mm + nn + oo + pp)

        #save_images(images, paths.mpath(outdir))

        pipe = make_movie(paths.mpath(outdir))

        return images
    else:
        return im
Пример #18
0
def img_onestep(filename):

    # Load the data
    ds = yt.load(filename)
    ad = ds.all_data()

    # Calculate the z position of the box.
    # You can use ds.domain_right_edge[2] instead. However, if a moving window
    # was used in the simulation, the rendering shows some jitter.
    # This is because a cell is added in z at some iterations but not all.
    # These lines calculate this jitter z_shift and remove it from the camera position and focus
    iteration = int(filename[-5:])
    dt = 1. / clight * 1. / np.sqrt(
        (1. / ad['dx'][-1]**2 + 1. / ad['dy'][-1]**2 + 1. / ad['dz'][-1]**2))
    z_front = dt * float(iteration) * clight
    z_shift = z_front - ds.domain_right_edge[2]

    # Create a yt source object for the level1 patch
    box_patch = yt.visualization.volume_rendering.render_source.BoxSource(
        left_edge=ds.index.grids[1].LeftEdge +
        np.array([0., 0., z_shift]) * yt.units.meter,
        right_edge=ds.index.grids[1].RightEdge +
        np.array([0., 0., z_shift]) * yt.units.meter,
        color=[1., 0.1, 0.1, .01])

    # Handle 2 populations of particles: beam and plasma electrons
    # Color for each of these particles
    colors0_vect = [1., 1., 1., .05]  # the last value is overwritten later
    colors1_vect = [1., 1., 1., .05]  # the last value is overwritten later
    # particle0: read data and create a yt source object
    x0 = ad['particle0', 'particle_position_x'].v
    y0 = ad['particle0', 'particle_position_y'].v
    z0 = ad['particle0', 'particle_position_z'].v
    vertices0 = np.column_stack((x0, y0, z0))
    colors0 = np.tile(colors0_vect, (vertices0.shape[0], 1))
    colors0[:, 3] = .01
    point0 = yt.visualization.volume_rendering.render_source.PointSource(
        vertices0, colors=colors0, radii=2)
    # particle1: read data and create a yt source object
    x1 = ad['particle1', 'particle_position_x'].v
    y1 = ad['particle1', 'particle_position_y'].v
    z1 = ad['particle1', 'particle_position_z'].v
    # select only some particles
    selector = np.abs(x1) < .1e-6
    x1 = x1[selector]
    y1 = y1[selector]
    z1 = z1[selector]
    vertices1 = np.column_stack((x1, y1, z1))
    colors1 = np.tile(colors1_vect, (vertices1.shape[0], 1))
    colors1[:, 3] = .002
    point1 = yt.visualization.volume_rendering.render_source.PointSource(
        vertices1, colors=colors1, radii=1)

    # Set the field rendering and camera attributes
    # Max field in the simulation. This is easy to get from a single image, but
    # it must be set by hand for a video
    my_max = 500000000000
    sc = yt.create_scene(ds, field='Ez')
    # Set camera properties
    cam = sc.camera
    cam.set_width(ds.quan(35, yt.units.micrometer))
    cam.position = ds.domain_center + np.array([
        15., 20., -5.
    ]) * yt.units.micrometer + np.array([0., 0., z_shift]) * yt.units.meter
    cam.focus = ds.domain_center + np.array([0., 0., z_shift]) * yt.units.meter
    cam.resolution = (2048, 2048)
    # Field rendering properties
    source = sc[0]
    source.set_field('Ez')
    source.set_log(False)
    source.use_ghost_zones = True
    bounds = (-my_max, my_max)
    tf = yt.ColorTransferFunction(bounds)
    w = (.01 * my_max)**2
    # Define the transfer function for 3d rendering
    # 3 isocontours for negative field values
    # The sharpness of the contour is controlled by argument width
    tf.add_gaussian(-.04 * my_max, width=8 * w, height=[0.1, 0.1, 1.0, 0.02])
    tf.add_gaussian(-.2 * my_max, width=5 * w, height=[0.1, 0.1, 1.0, 0.05])
    tf.add_gaussian(-.6 * my_max, width=w, height=[0.0, 0.0, 1.0, 0.3])
    # 3 isocontours for positive field values
    tf.add_gaussian(.04 * my_max, width=8 * w, height=[1.0, 1.0, 0.2, 0.02])
    tf.add_gaussian(.2 * my_max, width=5 * w, height=[1.0, 1.0, 0.2, 0.05])
    tf.add_gaussian(.6 * my_max, width=w, height=[1.0, 1.0, 0.0, 0.3])
    source.tfh.tf = tf
    source.tfh.bounds = bounds
    source.tfh.set_log(False)
    source.tfh.tf.grey_opacity = True
    # Plot user-defined sources (here, 2 species for particles and 1 box)
    sc.add_source(point0)
    sc.add_source(point1)
    sc.add_source(box_patch)
    # Save file
    sc.save(filename + '_quarter.png', sigma_clip=1.)
    return 0
Пример #19
0
def doit(plotfile):

    ds = CastroDataset(plotfile)
    ds._periodicity = (True, True, True)

    field = ('boxlib', 'enuc')
    ds._get_field_info(field).take_log = True

    sc = Scene()

    # add a volume: select a sphere
    #center = (0, 0, 0)
    #R = (5.e8, 'cm')

    #dd = ds.sphere(center, R)

    vol = create_volume_source(ds.all_data(), field=field)
    sc.add_source(vol)

    # transfer function
    vals = [16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5]
    sigma = 0.1

    tf = yt.ColorTransferFunction((min(vals), max(vals)))

    tf.clear()

    cmap = "viridis"

    for v in vals:
        if v < 19.0:
            alpha = 0.25
        else:
            alpha = 0.75

        tf.sample_colormap(v, sigma**2, alpha=alpha, colormap=cmap)

    sc.get_source(0).transfer_function = tf

    cam = sc.add_camera(ds, lens_type="perspective")
    cam.resolution = (1920, 1280)

    # view 1

    cam.position = [
        0.75 * ds.domain_right_edge[0],
        0.5 * (ds.domain_left_edge[1] + ds.domain_right_edge[1]),
        ds.domain_right_edge[2]
    ]  # + 0.25 * (ds.domain_right_edge[2] - ds.domain_left_edge[2])]

    # look toward the center
    center = 0.5 * (ds.domain_left_edge + ds.domain_right_edge)
    # set the center in the vertical direction to be the height of the underlying base layer
    center[-1] = 2000 * cm

    normal = (center - cam.position)
    normal /= np.sqrt(normal.dot(normal))

    cam.switch_orientation(normal_vector=normal, north_vector=[0., 0., 1.])
    cam.set_width(3 * ds.domain_width)
    cam.zoom(3.0)
    sc.camera = cam

    sc.save_annotated(
        "{}_Hnuc_annotated_side.png".format(plotfile),
        text_annotate=[[(0.05, 0.05), "t = {}".format(ds.current_time.d),
                        dict(horizontalalignment="left")],
                       [(0.5, 0.95),
                        "Castro simulation of XRB flame spreading",
                        dict(color="y",
                             fontsize="24",
                             horizontalalignment="center")]])

    # view 2

    dx = ds.domain_right_edge[0] - ds.domain_left_edge[0]
    cam.position = [
        0.5 * (ds.domain_left_edge[0] + ds.domain_right_edge[0]) + 0.0001 * dx,
        0.5 * (ds.domain_left_edge[1] + ds.domain_right_edge[1]),
        ds.domain_right_edge[2]
    ]  # + 0.25 * (ds.domain_right_edge[2] - ds.domain_left_edge[2])]

    # look toward the center
    center = 0.5 * (ds.domain_left_edge + ds.domain_right_edge)
    # set the center in the vertical direction to be the height of the underlying base layer
    center[-1] = 2000 * cm

    normal = (center - cam.position)
    normal /= np.sqrt(normal.dot(normal))

    cam.switch_orientation(normal_vector=normal, north_vector=[0., 0., 1.])
    cam.set_width(3 * ds.domain_width)
    cam.zoom(0.6)
    sc.camera = cam

    sc.save_annotated(
        "{}_Hnuc_annotated_top.png".format(plotfile),
        text_annotate=[[(0.05, 0.05), "t = {}".format(ds.current_time.d),
                        dict(horizontalalignment="left")],
                       [(0.5, 0.95),
                        "Castro simulation of XRB flame spreading",
                        dict(color="y",
                             fontsize="24",
                             horizontalalignment="center")]])
Пример #20
0
	data = dict(density=array)
	bbox = np.array([[0., 1.0], [0, 1.0], [0., 1.0]])
	ds = yt.load_uniform_grid(data, array.shape,length_unit="cm",bbox=bbox)
	ad = ds.all_data()
	sc = yt.create_scene(ds, field="density")
	sc.camera.width = (0.3,'cm')
	alpha = i/(365.0-36.0)*2*math.pi
	sc.camera.position = np.array([0.5,0.5,0.5])+np.array([np.cos(alpha),0,np.sin(alpha)])
	sc.camera.focus = np.array([0.5,0.5,0.5])
	sc.camera.north_vector = np.array([0,1.0,0])
# sc.camera.focus = np.array(np.unravel_index(array.argmax(),array.shape))/406.0

	source =sc[0]
	
	# Choose bounds and levels wisely for maximum variation
	
	bounds = (-1, 1)

	tf = yt.ColorTransferFunction(bounds)

	tf.sample_colormap(0., w=.005, alpha =0.2,colormap='RAINBOW')
	tf.sample_colormap(0.8, w=.005, alpha =0.2,colormap='RAINBOW')
	tf.sample_colormap(-0.8, w=.005, alpha =0.2,colormap='RAINBOW')
	
	

	source.tfh.tf = tf
	source.tfh.bounds = bounds


	sc.save(root+'PhaseMovie/Phase_rotrendering%03d.png'%i,sigma_clip=4.0)
Пример #21
0
    data['dvs'] = np.flipud(data['dvs'])
    # data['dvs']=np.transpose(data['dvs'],axes=[0,1,2])
    bbox = np.array([x, y, z])

    # yt spherical expects R, theta, phi (depth, ~lat,~lon)
    sc_mult = 1.0  # scale multiplier
    # bbox = model.cart['bbox']
    ds = yt.load_uniform_grid(data,
                              data['dvs'].shape,
                              sc_mult,
                              bbox=bbox,
                              nprocs=1,
                              periodicity=(False, False, False),
                              unit_system="mks")

    sc = yt.create_scene(ds, 'dvs')
    sc.camera.set_width(ds.quan(15 * 1000., 'm'))
    # sc.camera.rotate(10*np.pi/180.)
    # sc.camera.roll(-5*np.pi/180)
    # sc.camera.yaw(-5*np.pi/180)
    # cam = sc.add_camera()
    # cam.resolution=1000
    tf = yt.ColorTransferFunction((0, 3))
    tf.add_layers(10, w=0.01)

    # Set the bounds of the transfer function
    source = sc.sources['source_00']
    source.set_transfer_function(tf)
    source.tfh.set_log(False)
    sc.save(os.path.join(out_dir, 'YS_volume.png'), sigma_clip=.25)
Пример #22
0
def doit(plotfile):

    ds = yt.load(plotfile)
    ds.periodicity = (True, True, True)

    field = ('boxlib', 'density')
    ds._get_field_info(field).take_log = True

    sc = Scene()

    # add a volume: select a sphere
    vol = VolumeSource(ds, field=field)
    vol.use_ghost_zones = True

    sc.add_source(vol)

    # transfer function
    vals = [-1, 0, 1, 2, 3, 4, 5, 6, 7]
    #vals = [0.1, 1.0, 10, 100., 1.e4, 1.e5, 1.e6, 1.e7]
    sigma = 0.1

    tf = yt.ColorTransferFunction((min(vals), max(vals)))

    tf.clear()
    cm = "coolwarm"
    cm = "spectral"
    for v in vals:
        if v < 3:
            alpha = 0.1
        else:
            alpha = 0.5
        tf.sample_colormap(v, sigma**2, colormap=cm, alpha=alpha)

    sc.get_source(0).transfer_function = tf

    cam = sc.add_camera(ds, lens_type="perspective")
    cam.resolution = (1920, 1080)
    cam.position = 1.5 * ds.arr(np.array([0.0, 5.e9, 5.e9]), 'cm')

    # look toward the center -- we are dealing with an octant
    center = 0.5 * (ds.domain_left_edge + ds.domain_right_edge)
    normal = (center - cam.position)
    normal /= np.sqrt(normal.dot(normal))

    cam.switch_orientation(normal_vector=normal, north_vector=[0., 0., 1.])
    cam.set_width(ds.domain_width)

    #sc.annotate_axes()
    #sc.annotate_domain(ds)

    pid = plotfile.split("plt")[1]
    sc.render()
    sc.save("wdmerger_{}_new.png".format(pid), sigma_clip=6.0)
    sc.save_annotated(
        "wdmerger_annotated_{}_new.png".format(pid),
        text_annotate=
        [[(0.05, 0.05), "t = {:.3f} s".format(float(ds.current_time.d)),
          dict(horizontalalignment="left")],
         [(0.5, 0.95),
          "Castro simulation of merging white dwarfs (0.6 $M_\odot$ + 0.9 $M_\odot$)",
          dict(color="y", fontsize="22", horizontalalignment="center")],
         [(0.95, 0.05), "M. Katz et al.",
          dict(color="w", fontsize="16", horizontalalignment="right")]])
Пример #23
0
def render_chem(yth2co321=yth2co321,
                yth2co303=yth2co303,
                ytsio=ytsio,
                outdir='yt_renders_chem3',
                size=512,
                scale=1100.,
                nframes=60,
                north_vector=[1, 0, 0],
                rot_vector=[1, 0, 0],
                movie=True,
                camera_angle=[-0.6, 0.4, 0.6]):

    if not os.path.exists(paths.mpath(outdir)):
        os.makedirs(paths.mpath(outdir))

    tf1 = yt.ColorTransferFunction([0.1, 2], grey_opacity=True)
    #tf1.add_gaussian(0.1,0.05, [1,0,0,1])
    #tf1.map_to_colormap(0, 0.5, scale=1, colormap='Reds')
    tf1.add_gaussian(0.25, 0.01, [1, 0, 0, 1])
    tf1.add_step(0.25, 0.5, [1, 0, 0, 1])
    #tf1.add_step(1,2,[1,1,1,1])
    tf1.map_to_colormap(0.5, 2, scale=1, colormap=red)
    tf2 = yt.ColorTransferFunction([0.1, 2], grey_opacity=True)
    #tf2.add_gaussian(0.1,0.05, [0,1,0,1])
    #tf2.map_to_colormap(0, 0.5, scale=1, colormap='Greens')
    tf2.add_gaussian(0.25, 0.01, [0, 1, 0, 1])
    tf2.add_step(0.25, 0.5, [0, 1, 0, 1])
    tf2.map_to_colormap(0.5, 2, scale=1, colormap=green)
    tf3 = yt.ColorTransferFunction([0.1, 2], grey_opacity=True)
    #tf3.add_gaussian(0.1,0.05, [0,0,1,1])
    #tf3.map_to_colormap(0, 0.5, scale=1, colormap='Blues')
    tf3.add_gaussian(0.25, 0.01, [0, 0, 1, 1])
    tf3.add_step(0.25, 0.5, [0, 0, 1, 1])
    tf3.map_to_colormap(0.5, 2, scale=1, colormap=blue)

    center = yth2co303.dataset.domain_dimensions / 2.
    camh2co303 = yth2co303.dataset.h.camera(center,
                                            camera_angle,
                                            scale,
                                            size,
                                            tf3,
                                            north_vector=north_vector,
                                            fields='flux')
    camh2co321 = yth2co321.dataset.h.camera(center,
                                            camera_angle,
                                            scale,
                                            size,
                                            tf2,
                                            north_vector=north_vector,
                                            fields='flux')
    camsio = ytsio.dataset.h.camera(center,
                                    camera_angle,
                                    scale,
                                    size,
                                    tf1,
                                    north_vector=north_vector,
                                    fields='flux')

    imh2co303 = camh2co303.snapshot()
    imh2co321 = camh2co321.snapshot()
    imsio = camsio.snapshot()

    pl.figure(1)
    pl.clf()
    pl.imshow(imh2co303 + imh2co321 + imsio)
    pl.figure(2)
    pl.clf()
    pl.imshow(imh2co303[:, :, :3] + imh2co321[:, :, :3] + imsio[:, :, :3])

    if movie:
        images_h2co303 = [imh2co303]
        images_h2co321 = [imh2co321]
        images_sio = [imsio]

        r1 = camh2co303.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        r2 = camh2co321.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        r3 = camsio.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        pb = ProgressBar(nframes * 3)
        for (ii, (imh2co303, imh2co321, imsio)) in enumerate(izip(r1, r2, r3)):
            images_h2co303.append(imh2co303)
            images_h2co321.append(imh2co321)
            images_sio.append(imsio)

            imh2co303 = imh2co303.swapaxes(0, 1)
            imh2co303.write_png(paths.mpath(
                os.path.join(outdir, "h2co303_%04i.png" % (ii))),
                                rescale=False)
            pb.update(ii * 3)
            imsio = imsio.swapaxes(0, 1)
            imsio.write_png(paths.mpath(
                os.path.join(outdir, "sio_%04i.png" % (ii))),
                            rescale=False)
            pb.update(ii * 3 + 1)
            imh2co321 = imh2co321.swapaxes(0, 1)
            imh2co321.write_png(paths.mpath(
                os.path.join(outdir, "h2co321_%04i.png" % (ii))),
                                rescale=False)
            pb.update(ii * 3 + 2)

        pb.next()

        save_images([
            i1 + i2 + i3
            for i1, i2, i3 in izip(images_h2co303, images_h2co321, images_sio)
        ], paths.mpath(outdir))

        make_movie(paths.mpath(outdir))

        return images_h2co303, images_h2co321, images_sio
    else:
        return imh2co303, imh2co321, imsio
# We begin by loading up yt, and importing the AMRKDTree
import numpy as np

import yt
from yt.utilities.amr_kdtree.api import AMRKDTree

# Load up a dataset and define the kdtree
ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
kd = AMRKDTree(ds)

# Print out specifics of KD Tree
print("Total volume of all bricks = %i" % kd.count_volume())
print("Total number of cells = %i" % kd.count_cells())

# Define a camera and take an volume rendering.
tf = yt.ColorTransferFunction((-30, -22))
cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
                  tf, volume=kd)
tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
cam.snapshot("v1.png", clip_ratio=6.0)

# This rendering is okay, but lets say I'd like to improve it, and I don't want
# to spend the time rendering the high resolution data.  What we can do is
# generate a low resolution version of the AMRKDTree and pass that in to the
# camera.  We do this by specifying a maximum refinement level of 6.

kd_low_res = AMRKDTree(ds, max_level=6)
print(kd_low_res.count_volume())
print(kd_low_res.count_cells())

# Now we pass this in as the volume to our camera, and render the snapshot
Пример #25
0
                          length_unit="Mpc",
                          bbox=bbox,
                          nprocs=64)

#slc = yt.SlicePlot(ds, "z", ["density"])
#slc.set_cmap("density", "Blues")
#slc.annotate_grids(cmap=None)
#slc.show()

#Find the min and max of the field
mi, ma = ds.all_data().quantities.extrema('density')
##Reduce the dynamic range
#mi = mi.value + 1.5e7
#ma = ma.value - 0.81e7

tf = yt.ColorTransferFunction((mi, ma), )

# Choose a vector representing the viewing direction.
L = [0.5, 0.5, 0.5]
# Define the center of the camera to be the domain center
c = ds.domain_center[0]
# Define the width of the image
W = 1.5 * ds.domain_width[0]
# Define the number of pixels to render
Npixels = 512

cam = ds.camera(c,
                L,
                W,
                Npixels,
                tf,
Пример #26
0
annotateddir = os.path.join(figuredir, 'annotated')
tfdir = os.path.join(figuredir, 'transfer_function')

if yt.is_root():
    for subdir in [figuredir, tfdir, annotateddir]:
        if not os.path.exists(subdir):
            os.mkdir(subdir)

#fname = '/home/ychen/d9/FLASH4/stampede/0529_L45_M10_b1_h1/MHD_Jet_hdf5_plt_cnt_0620'
#ds = yt.load(fname)

bounds = (2e7, 3e9)

# Since this rendering is done in log space, the transfer function needs
# to be specified in log space.
tf = yt.ColorTransferFunction(np.log10(bounds))

tf.sample_colormap(np.log10(2E9), 0.005, alpha=0.1, colormap="arbre")
#tf.sample_colormap(np.log10(1E9), 0.005, alpha=0.5, colormap="arbre")
#tf.sample_colormap(np.log10(6E8), 0.005, alpha=0.1,  colormap="arbre")
tf.sample_colormap(np.log10(7.5E8), 0.005, alpha=0.1, colormap="arbre")
tf.sample_colormap(np.log10(2.5E8), 0.005, alpha=0.1, colormap="arbre")
tf.sample_colormap(np.log10(1E8), 0.005, alpha=0.1, colormap="arbre")
tf.sample_colormap(np.log10(3.5E7), 0.005, alpha=0.01, colormap="arbre")

for ds in ts.piter():
    sc = yt.create_scene(ds, field='temperature', lens_type='plane-parallel')

    render_source = sc.get_source(0)
    render_source.transfer_function = tf
    render_source.tfh.tf = tf
Пример #27
0
# First we use the simple_volume_rendering.py recipe from above to generate
# a standard volume rendering.  The only difference is that we use
# grey_opacity=True with our TransferFunction, as the colored background
# functionality requires images with an opacity between 0 and 1.

# We have removed all the comments from the volume rendering recipe for
# brevity here, but consult the recipe for more details.

import yt
import numpy as np

ds = yt.load("Enzo_64/DD0043/data0043")
ad = ds.all_data()
mi, ma = ad.quantities.extrema("density")
tf = yt.ColorTransferFunction((np.log10(mi) + 1, np.log10(ma)),
                              grey_opacity=True)
tf.add_layers(5, w=0.02, colormap="spectral")
c = [0.5, 0.5, 0.5]
L = [0.5, 0.2, 0.7]
W = 1.0
Npixels = 512
cam = ds.camera(c, L, W, Npixels, tf)
im = cam.snapshot("original.png" % ds, clip_ratio=8.0)

# Our image array can now be transformed to include different background
# colors.  By default, the background color is black.  The following
# modifications can be used on any image array.

# write_png accepts a background keyword argument that defaults to 'black'.
# Other choices include:
# black (0.,0.,0.,1.)
Пример #28
0
def img_onestep(filename):
    ds = yt.load( filename )
    ad = ds.all_data()
    iteration=int(filename[-5:])
    sc = yt.create_scene(ds, field='Ez')
    if use_moving_window:
        z_shift = jitter_shift( ds, ad, cfl, iteration )
    array_shift = z_shift * np.array([0., 0., 1.])
    if plot_mr_patch:
        box_patch = yt.visualization.volume_rendering.render_source.BoxSource(
            left_edge =ds.index.grids[1].LeftEdge +array_shift,
            right_edge=ds.index.grids[1].RightEdge+array_shift,
            color=[1.,0.1,0.1,.01] )
        sc.add_source(box_patch)
    ########################
    ### volume rendering ###
    ########################
    source = sc[0]
    source.use_ghost_zones = True
    source.grey_opacity = True
    source.set_log(False)
    tf = yt.ColorTransferFunction(bounds)
    if rendering_type == 'smooth':
        tf.add_gaussian(-my_max/4, width=15**2*w,  height=[0.0, 0.0, 1.0, 1])
        tf.add_gaussian( my_max/4, width=15**2*w,  height=[1.0, 0.0, 0.0, 1])
    if rendering_type == 'layers':
        # NEGATIVE
        tf.add_gaussian(-.04 *my_max, width=8*w,  height=[0.1, 0.1, 1.0, 0.2])
        tf.add_gaussian(-.2 *my_max, width=5*w,  height=[0.1, 0.1, 1.0, 0.5])
        tf.add_gaussian(-.6 *my_max, width=w,  height=[0.0, 0.0, 1.0, 1.])
        # POSITIVE
        tf.add_gaussian(.04 *my_max, width=8*w,  height=[1.0, 1.0, 0.2, 0.2])
        tf.add_gaussian(.2 *my_max, width=5*w,  height=[1.0, 1.0, 0.2, 0.5])
        tf.add_gaussian(.6 *my_max, width=w,  height=[1.0, 1.0, 0.0, 1.])

    ######################
    ### plot particles ###
    ######################
    species_points = {}
    for species in species_to_plot:
        species_points[ species ] = get_species_ytpoints(ad,
                                        species, species_colors[species])
        sc.add_source( species_points[ species ] )
    source.tfh.tf = tf
    source.tfh.bounds = bounds
    #########################
    ### camera properties ###
    #########################
    cam = sc.camera
    cam.resolution = (2048, 2048)
    cam.width = .00018*yt.units.meter
    cam.focus = ds.domain_center + \
                np.array([0., 0., 10.e-6 ])*yt.units.meter + \
                array_shift
    cam.position = ds.domain_center + \
                np.array([15., 15., -5.  ])*yt.units.micrometer + \
                array_shift
    cam.normal_vector = [-0.3, -0.3, -.2]
    cam.switch_orientation()
    # save image
    if rendering_type == 'smooth':
        sc.save('img_' + str(my_number_list[count]).zfill(5), sigma_clip=5.)
    if rendering_type == 'layers':
        sc.save('img_' + str(my_number_list[count]).zfill(5), sigma_clip=2.)
Пример #29
0
def visualize_tables(table_path,
                     geom_file,
                     slices=True,
                     projections=True,
                     plot_3d=True):
    tables_dir = dirname(table_path)
    table_fname = basename(table_path)
    tables_basename = table_fname
    for name in [
            'survival_prob', 'avg_photon_x', 'avg_photon_y', 'avg_photon_z'
    ]:
        tables_basename = tables_basename.replace('_' + name + '.fits', '')
    if tables_basename[-1] == '_':
        tables_basename = tables_basename[:-1]

    det_string_depth_xyz = np.load(geom_file)

    num_doms_in_detector = np.prod(det_string_depth_xyz.shape[:2])

    data = {}
    fname = '%s_survival_prob.fits' % tables_basename
    fpath = join(tables_dir, fname)
    with pyfits.open(fpath) as fits_file:
        survival_prob = fits_file[0].data
        ma = survival_prob.max()
        print('Max survival probability         :', ma)
        mi = survival_prob.min()
        print('Min survival probability         :', mi)
        mi_nonzero = survival_prob[survival_prob != 0].min()
        print('Min non-zero survival probability:', mi_nonzero)
        data['density'] = (survival_prob, 'kg/m**3')
        xyz_shape = fits_file[1].data
        lims = fits_file[2].data
        doms_used = fits_file[3].data

        # If 3D, dims represent: (string numbers, depth indices, (x, y, z))
        if len(doms_used.shape) == 3:
            doms_used = np.stack(
                (doms_used[:, :, 0].flatten(), doms_used[:, :, 1].flatten(),
                 doms_used[:, :, 2].flatten())).T

        nx, ny, nz = xyz_shape
        xlims = lims[0, :]
        ylims = lims[1, :]
        zlims = lims[2, :]
        print('x lims:', xlims)
        print('y lims:', ylims)
        print('z lims:', zlims)
        print('(nx, ny, nz):', xyz_shape)
        num_doms_used = doms_used.shape[0]
        print('num doms used:', num_doms_used)
        print('doms used:', doms_used.shape)

    if slices:
        mask = survival_prob > 0  #(ma / 10000000)
        avg_photon_info = {}
        for dim in ['x', 'y', 'z']:
            fname = '%s_avg_photon_%s.fits' % (tables_basename, dim)
            fpath = join(tables_dir, fname)
            with pyfits.open(fpath) as fits_file:
                d = np.zeros_like(survival_prob)
                d[mask] = fits_file[0].data[mask]
                #d = -fits_file[0].data
                avg_photon_info[dim] = d
                data['velocity_' + dim] = (d, 'm/s')
        avg_photon_info = Cart3DCoord(**avg_photon_info)
        del mask

    bbox = lims
    ds = yt.load_uniform_grid(data,
                              domain_dimensions=(nx, ny, nz),
                              bbox=bbox,
                              nprocs=4)

    savefig_kw = dict(name=join(tables_dir, tables_basename),
                      suffix='png',
                      mpl_kwargs=dict(dpi=300))
    plots = []

    sphere_kwargs = dict(radius=(5 * DOM_RADIUS_M, 'cm'),
                         coord_system='data',
                         circle_args=dict(color=(0, 0.8, 0),
                                          linewidth=1,
                                          alpha=0.3))

    if projections:
        for normal in ['x', 'y', 'z']:
            prj = yt.ProjectionPlot(ds, normal, 'density')
            prj.set_log('density', False)
            prj.set_cmap('density', 'inferno')

            # Display all doms in the detector
            for depth_xyz in det_string_depth_xyz:
                for xyz in depth_xyz:
                    prj.annotate_sphere(xyz, **sphere_kwargs)

            # Display only doms used (if subset of the detector)
            if num_doms_used != num_doms_in_detector:
                kw = deepcopy(sphere_kwargs)
                kw['radius'] = (15 * DOM_RADIUS_M, 'cm')
                kw['circle_args']['alpha'] = 1
                for depth_xyz in doms_used:
                    prj.annotate_sphere(depth_xyz, **kw)

            prj.save(**savefig_kw)
            plots.append(prj)

    if plot_3d:
        # Choose a vector representing the viewing direction.
        L = [-0.5, -0.5, -0.5]

        # Define the center of the camera to be the domain center
        c = ds.domain_center[0]
        #c = (1400*100, 1300*100, 1300*100)

        # Define the width of the image
        W = 1.0 * ds.domain_width[0]

        # Define the number of pixels to render
        Npixels = 2048

        sc = yt.create_scene(ds, 'density')
        source = sc[0]
        source.log_field = False

        tf = yt.ColorTransferFunction((0, ma), grey_opacity=True)
        tf.map_to_colormap(0, ma, scale=1.0, colormap='inferno')

        source.set_transfer_function(tf)

        sc.add_source(source)

        cam = sc.add_camera()
        cam.width = W
        cam.center = c
        cam.normal_vector = L
        cam.north_vector = [0, 0, 1]
        cam.position = (1400, 1300, 1300)

        #sc.show(sigma_clip=4)

        sc.save(savefig_kw['name'])

        plots.append(sc)

    if slices:
        skw = deepcopy(sphere_kwargs)
        skw['circle_args']['color'] = (0.8, 0, 0)
        if num_doms_used != num_doms_in_detector:
            center = np.mean(doms_used, axis=0)
        else:
            center = (0, 0, 0)
            #center = det_string_depth_xyz[35, 47]
            #cut_plane_strings = [1 - s for s in [6, 12, 27, 36, 45, 54, 62, 69, 75]]
            #normal =
            #north_vector = (0, 0, 1)

        for normal in ['x', 'y', 'z']:
            #if normal == 'x':
            #    plt.
            slc = yt.SlicePlot(ds,
                               normal=normal,
                               fields='density',
                               center=center)
            slc.set_cmap('density', 'octarine')
            #slc.set_log('density', False)

            for depth_xyz in det_string_depth_xyz:
                for xyz in depth_xyz:
                    slc.annotate_sphere(xyz, **skw)

            if num_doms_used != num_doms_in_detector:
                kw = deepcopy(skw)
                kw['radius'] = (15 * DOM_RADIUS_M, 'cm')
                kw['circle_args']['alpha'] = 1
                for depth_xyz in doms_used:
                    slc.annotate_sphere(depth_xyz, **kw)

            nskip = 10
            kw = dict(factor=nskip, scale=1e3)
            if normal == 'x':
                slc.annotate_quiver('velocity_y', 'velocity_z', **kw)
            elif normal == 'y':
                slc.annotate_quiver('velocity_z', 'velocity_x', **kw)
            elif normal == 'z':
                slc.annotate_quiver('velocity_x', 'velocity_y', **kw)

            #slc.annotate_grids(cmap=None)
            slc.save(**savefig_kw)
            plots.append(slc)

    return ds, plots
Пример #30
0
def vol_render_density(outfile, ds):
    """Volume render the density given a yt dataset."""

    import numpy as np
    import yt
    import matplotlib
    matplotlib.use('agg')
    from yt.visualization.volume_rendering.api import Scene, VolumeSource
    import matplotlib.pyplot as plt

    ds.periodicity = (True, True, True)

    field = ('boxlib', 'density')
    ds._get_field_info(field).take_log = True

    sc = Scene()

    # Add a volume: select a sphere

    vol = VolumeSource(ds, field=field)
    vol.use_ghost_zones = True

    sc.add_source(vol)

    # Transfer function

    vals = [-1, 0, 1, 2, 3, 4, 5, 6, 7]
    sigma = 0.1

    tf = yt.ColorTransferFunction((min(vals), max(vals)))

    tf.clear()

    cm = "spectral"

    for v in vals:
        if v < 3:
            alpha = 0.1
        else:
            alpha = 0.5
        tf.sample_colormap(v, sigma**2, colormap=cm, alpha=alpha)

    sc.get_source(0).transfer_function = tf

    cam = sc.add_camera(ds, lens_type="perspective")
    cam.resolution = (1920, 1080)

    center = 0.5 * (ds.domain_left_edge + ds.domain_right_edge)
    width = ds.domain_width

    # Set the camera so that we're looking down on the xy plane from a 45
    # degree angle. We reverse the y-coordinate since yt seems to use the
    # opposite orientation convention to us (where the primary should be
    # on the left along the x-axis). We'll scale the camera position based
    # on a zoom factor proportional to the width of the domain.

    zoom_factor = 0.75

    cam_position = np.array([
        center[0], center[1] - zoom_factor * width[1],
        center[2] + zoom_factor * width[2]
    ])

    cam.position = zoom_factor * ds.arr(cam_position, 'cm')

    # Set the normal vector so that we look toward the center.

    normal = (center - cam.position)
    normal /= np.sqrt(normal.dot(normal))

    cam.switch_orientation(normal_vector=normal, north_vector=[0.0, 0.0, 1.0])
    cam.set_width(width)

    # Render the image.

    sc.render()

    # Save the image without annotation.

    sc.save(outfile, sigma_clip=6.0)

    # Save the image with a colorbar.

    sc.save_annotated(outfile.replace(".png", "_colorbar.png"), sigma_clip=6.0)

    # Save the image with a colorbar and the current time.

    sc.save_annotated(outfile.replace(".png", "_colorbar_time.png"),
                      sigma_clip=6.0,
                      text_annotate=[[
                          (0.05, 0.925),
                          "t = {:.2f} s".format(float(ds.current_time.d)),
                          dict(horizontalalignment="left", fontsize="20")
                      ]])