Example #1
0
def get_data(start,stop):
     
     #n = np.exp(np.squeeze(collect("n",tind=[start,stop],path=path,info=False)))
     n = (np.squeeze(collect("n",tind=[start,stop],path=path,info=False)))
     #phi = (np.squeeze(collect("phi",tind=[start,stop],path=path,info=False)))
     n_mmap = np.memmap(nfile,dtype=n.dtype.name,mode='w+',shape=n.shape)
     n_mmap[:] = n[:]
     
     print 'n_mmap.shape :',n_mmap.shape,n.shape
     del n
     gc.collect()
     u = np.squeeze(collect("u",tind=[start,stop],path=path,info=False))
     u_mmap = np.memmap(ufile,dtype=u.dtype.name,mode='w+',shape=u.shape)
     u_mmap[:] = u[:]
     del u

     gc.collect()
     fft_u = np.fft.rfft(u_mmap)
     power = fft_u.conj()*fft_u
     A_k = np.real(np.sqrt(power))
     
     del fft_u,power
     
     gc.collect()
     phi = np.squeeze(collect("phi",tind=[start,stop],path=path,info=False))
     phi_mmap = np.memmap(phifile,dtype=phi.dtype.name,mode='w+',shape=phi.shape)
     phi_mmap[:] = phi[:]
     
     del phi


     gc.collect()
     
     return n_mmap,u_mmap,A_k,phi_mmap
Example #2
0
def draw(name,zshift_tol = 0.25, path = None):
    # Set the work_dir
    if path == None:
        work_dir = os.getcwd()
    if path != None:
        work_dir = os.chdir(path)
        work_dir = os.getcwd()

    grid_file = visual.get("BOUT.inp","grid") # Import grid file

    eigs = collect("t_array") # Import Eigen values
    var = collect(name) # Import variable
    nx,ny,nz = visual.dimd(name) # Set the shape of the variable
    #Import the R,Z values from the grid file
    r = visual.nc_var(grid_file,'Rxy') # R
    z = visual.nc_var(grid_file,'Zxy') # Z
    zshift = visual.nc_var(grid_file, 'zShift') # zShift
    
    z_tol = visual.z_shift_tol(nx,ny,zshift,zshift_tol) # set the z_tolerance value
    
    #Interpolate r, z, zshift values

    r2, ny2 = visual.zshift_interp2d(nx,ny,zshift,z_tol,r)
    z2, ny2 = visual.zshift_interp2d(nx,ny,zshift,z_tol,z)
    zshift2, ny2 = visual.zshift_interp2d(nx,ny,zshift,z_tol,zshift)
    
    pts2 = visual.elm(r2,z2,zshift2,nx,ny2,nz) # Write the vtk points
    
    # Plot eigen values and write the associated vtk file
    plot_eigenvals(eigs, nx, ny, nz ,zshift, z_tol, ny2, name, pts2, var)
Example #3
0
    def run(self):
        data1 = collect(self.var1, tind = self.time, path=self.path)
        data2 = collect(self.var2, tind = self.time, path=self.path)
        plot = mlab.figure()
        field1 = mlab.pipeline.scalar_field(data1)
        field2 = mlab.pipeline.scalar_field(data2)
        mlab.pipeline.volume(field1, figure=plot)

        for n in np.around(range(self.no_slices)):
            mlab.pipeline.image_plane_widget(field, figure=plot, plane_orientation = 'y_axes', slice_index=( np.int((n + 3./4.)*((np.shape(data)[1]/self.no_slices)) ) ) )
Example #4
0
def perform_MMS_test(paths, extension='.pdf', show_plot=False):
    """Collects the data members belonging to a convergence plot"""

    # Make a variable to store the errors and the spacing
    data = {'error_2': [], 'error_inf': [], 'spacing': []}

    # Loop over the runs in order to collect
    for path in paths:
        # Collect n_solution - n_numerical
        error_array = collect('E_n', path=path, info=False,\
                              xguards = False, yguards = False)
        # Pick the last time point
        error_array = error_array[-1]

        # The error in the 2-norm and infintiy-norm
        data['error_2'].append(np.sqrt(np.mean(error_array**2.0)))
        data['error_inf'].append(np.max(np.abs(error_array)))

        # Collect the spacings
        dx_spacing = collect("dx", path=path, info=False,\
                             xguards = False, yguards = False)
        dy_spacing = collect("dy", path=path, info=False,\
                             xguards = False, yguards = False)
        dz_spacing = collect("dz", path=path, info=False,\
                             xguards = False, yguards = False)
        # We are interested in the max of the spacing
        dx_spacing = np.max(dx_spacing)
        dy_spacing = np.max(dy_spacing)
        dz_spacing = np.max(dz_spacing)

        # Store the spacing in the data
        data['spacing'].append(np.max([dx_spacing, dy_spacing, dz_spacing]))

    # Sort the data
    data = sort_data(data)

    # Find the order of convergence in the 2 norm and infinity norm
    order_2, order_inf = get_order(data)

    # Get the root name of the path (used for saving files)
    root_folder = paths[0].split('/')[0] + '/'

    # Get the name of the plot based on the first folder name
    name = paths[0].split('/')
    # Remove the root folder and put 'MMS-' in front
    name = 'MMS-' + '_'.join(name[1:])

    # Print the convergence rate
    print_convergence_rate(data, order_2, order_inf, root_folder, name)

    # Plot
    # We want to show the lines of the last orders, so we send in
    # order_2[-1] and order_inf[-1]
    do_plot(data, order_2[-1], order_inf[-1],\
            root_folder, name, extension, show_plot)
def perform_MMS_test(paths, extension='.pdf', show_plot=False):
    """Collects the data members belonging to a convergence plot"""

    # Make a variable to store the errors and the spacing
    data = {'error_2':[], 'error_inf':[], 'spacing':[]}

    # Loop over the runs in order to collect
    for path in paths:
        # Collect n_solution - n_numerical
        error_array = collect('E_n', path=path, info=False,\
                              xguards = False, yguards = False)
        # Pick the last time point
        error_array = error_array[-1]

        # The error in the 2-norm and infintiy-norm
        data['error_2']  .append( np.sqrt(np.mean( error_array**2.0 )) )
        data['error_inf'].append( np.max(np.abs( error_array )) )

        # Collect the spacings
        dx_spacing = collect("dx", path=path, info=False,\
                             xguards = False, yguards = False)
        dy_spacing = collect("dy", path=path, info=False,\
                             xguards = False, yguards = False)
        dz_spacing = collect("dz", path=path, info=False,\
                             xguards = False, yguards = False)
        # We are interested in the max of the spacing
        dx_spacing = np.max(dx_spacing)
        dy_spacing = np.max(dy_spacing)
        dz_spacing = np.max(dz_spacing)

        # Store the spacing in the data
        data['spacing'].append(np.max([dx_spacing, dy_spacing, dz_spacing]))

    # Sort the data
    data = sort_data(data)

    # Find the order of convergence in the 2 norm and infinity norm
    order_2, order_inf = get_order(data)

    # Get the root name of the path (used for saving files)
    root_folder = paths[0].split('/')[0] + '/'

    # Get the name of the plot based on the first folder name
    name = paths[0].split('/')
    # Remove the root folder and put 'MMS-' in front
    name = 'MMS-' + '_'.join(name[1:])

    # Print the convergence rate
    print_convergence_rate(data, order_2, order_inf, root_folder, name)

    # Plot
    # We want to show the lines of the last orders, so we send in
    # order_2[-1] and order_inf[-1]
    do_plot(data, order_2[-1], order_inf[-1],\
            root_folder, name, extension, show_plot)
Example #6
0
def total_sources(path, tind=-1):
    """Return total sources integrated over the domain in SI units:
    - Volume [m^3]
    - Particles [s^-1]
    - Power into electrons and ions [Watts]

    Inputs
    ------
    path    The path to the data files
    tind    Time index
    """

    # Normalisation
    qe = 1.602e-19  # Electron charge [C]
    Nnorm = collect("Nnorm", path=path)  # Reference density [m^-3]
    Tnorm = collect("Tnorm", path=path)  # Reference temperature [eV]
    rho_s0 = collect("rho_s0", path=path)  # Reference length [m]
    wci = collect("Omega_ci", path=path)  # Reference frequency [s^-1]

    # Metrics
    J = collect("J", path=path)
    dx = collect("dx", path=path)
    dy = collect("dy", path=path)
    dz = collect("dz", path=path)

    dV = J * dx * dy * dz  # Normalised cell volume

    dV_si = dV * rho_s0 ** 3  # Cell volume [m^3] as a function of [x,y]

    result = {}

    # Read and convert units for each source
    # Note the 3/2 factor multiplying the pressure source to convert to internal energy
    for source_name, norm in [
        ("NeSource", Nnorm * wci),
        ("PeSource", (3.0 / 2) * qe * Nnorm * Tnorm * wci),
        ("PiSource", (3.0 / 2) * qe * Nnorm * Tnorm * wci),
    ]:
        # Read source, select single time, exclude X guard cells, convert to SI units
        source = norm * collect(source_name, path=path, tind=tind)[-1, 2:-2, :, :]

        # Get number of Z points
        nz = source.shape[-1]

        # Multiply by cell volume [m^3]
        for z in range(nz):
            source[:, :, z] *= dV_si[2:-2, :]

        # Sum over all cells
        result[source_name] = np.sum(source)

    result["volume"] = np.sum(dV_si[2:-2, :]) * nz  # Exclude guard cells
    return result
Example #7
0
def safeCollect(*args, **kwargs):
    #{{{docstring
    """
    Wrapper around collect which sets the data immutable

    Parameters
    ----------
    *args : positional arguments
        Positional arguments to collect
    *kwargs : keyword arguments
        Keyword arguments to collect

    Return
    ------
    data : array
        The array is not writeable
    """
    #}}}

    try:
        data = collect(*args, **kwargs)
    except Exception as e:
        print("\nFailed to collect {}\n".format(kwargs["path"]))
        raise e

    # write = False prevents writing
    data.setflags(write=False)

    return data
Example #8
0
def analyse(path="capillary"):
    dx = collect("dx", path=path)[0, 0]
    dz = collect("dz", path=path)

    t_array = collect("t_array", path=path)

    vof = collect("vof", path=path)
    nt, nx, _, nz = vof.shape

    mid_x = (nx - 4.) / 2. + 1.5
    mid_z = nz / 2.0 - 0.5  # Index at the middle

    imz = int(nz / 4)

    result = zeros(nt)

    # Find where vof goes from 0 to 1 or 1 to 0
    for tind in range(nt):
        data = vof[tind, :, 0, imz]

        indl = 0
        indh = nx - 1

        if data[indl] < 0.5 and data[indh] > 0.5:
            # Find where vof transitions from 0 to 1
            data = 1.0 - data
        while indl != indh:
            mid = int((indl + indh) / 2.0)

            if (mid == indl) or (mid == indh):
                break

            if data[mid] > 0.5:
                indl = mid
            else:
                indh = mid

        if data[mid] < 0.5:
            mid -= 1
        mid += (0.5 - data[mid]) / (data[mid + 1] - data[mid])

        result[tind] = mid

    # convert to displacement
    return t_array, (mid_x - result) * dx  # meters
Example #9
0
def get_data(start,stop):
     
     #n = np.exp(np.squeeze(collect("n",tind=[start,stop],path=path,info=False)))
     n = (np.squeeze(collect("n",tind=[start,stop],path=path,info=False)))
     #phi = (np.squeeze(collect("phi",tind=[start,stop],path=path,info=False)))
     n_mmap = np.memmap(nfile,dtype=n.dtype.name,mode='w+',shape=n.shape)
     n_mmap[:] = n[:]
     
     print 'n_mmap.shape :',n_mmap.shape,n.shape
     del n
     gc.collect()
     u = np.squeeze(collect("u",tind=[start,stop],path=path,info=False))
     u_mmap = np.memmap(ufile,dtype=u.dtype.name,mode='w+',shape=u.shape)
     u_mmap[:] = u[:]
     del u

     gc.collect()
     #A_k = np.fft.rfft(u_mmap,axis=2)
     
     #power = fft_u.conj()*fft_u
     #A_k = np.real(np.sqrt(power))
     
     #del fft_u,power
     
     gc.collect()
     phi = np.squeeze(collect("phi",tind=[start,stop],path=path,info=False))
     phi_mmap = np.memmap(phifile,dtype=phi.dtype.name,mode='w+',shape=phi.shape)
     phi_mmap[:] = phi[:]
     
     del phi


     gc.collect()
     
     T = (np.squeeze(collect("Te",tind=[start,stop],path=path,info=False)))
     T_mmap = np.memmap(Tefile,dtype=T.dtype.name,mode='w+',shape=T.shape)
     T_mmap[:] = T[:]
    
     del T
     gc.collect()

     n_k = np.fft.rfft(n_mmap,axis=2)
     u_k = np.fft.rfft(u_mmap,axis=2)
     T_k = np.fft.rfft(T_mmap,axis=2)
     return n_mmap,u_mmap,(n_k,u_k,T_k),phi_mmap,T_mmap
Example #10
0
    def __getitem__(self, name):
        """
        Reads a variable using collect.
        
        """

        # Collect the data from the repository
        data = collect(name, path=self._path, prefix=self._prefix)
        return data
Example #11
0
    def __getitem__(self, name):
        """
        Reads a variable using collect.
        
        """

        # Collect the data from the repository
        data = collect(name, path=self._path, prefix=self._prefix)
        return data
Example #12
0
    def test_multiple_files_along_x(self):
        var = 'n'
        expected = collect(var,
                           path='./tests/data/dump_files/',
                           prefix='BOUT.dmp',
                           xguards=False)

        ds, metadata = _auto_open_mfboutdataset(
            './tests/data/dump_files/BOUT.dmp.*.nc')
        actual = ds[var].values

        assert expected.shape == actual.shape
        npt.assert_equal(actual, expected)
Example #13
0
    def test_single_file(self):
        var = 'n'
        expected = collect(var,
                           path='./tests/data/dump_files/single',
                           prefix='equilibrium',
                           xguards=False)

        ds, metadata = _auto_open_mfboutdataset(
            './tests/data/dump_files/single/equilibrium.nc')
        print(ds)
        actual = ds[var].values

        assert expected.shape == actual.shape
        npt.assert_equal(actual, expected)
Example #14
0
def update():
    mu_list = ['1e-3','1e-2','1e-1']
    NP = 264
    

    for mu in mu_list:
        data_dir = '/scratch/01523/meyerson/BOUT_sims/ConvectSOL'
        label=str(NP)+'_mu='+mu+'_trash'
        sim_key = 'convect_sol_XZ_'+mu+'_trash'

        path=data_dir+'/data_'+sim_key

        print path
        try:
            n = np.squeeze(collect("n",path=path,tind=[0,299]))
        except:
            n = np.squeeze(collect("n",path=path,tind=[0,200]))

        nt,nx,ny = n.shape

        dx = np.squeeze(collect("dx",path=path,xind=[0,0]))
        dy = np.squeeze(collect("dz",path=path,xind=[0,0]))
        yO = -.5*(dy*ny)
        xO = -.17949 *(dx*nx)


        meta={'dx':dx,'dy':dy,'y0':yO,'x0':xO,'dt':1e-1}
        nt,nx,ny = n.shape
        
   
        z = CM_mass(n,meta=meta,label=label)
    
        f_db = open('TACC_'+label+'_db','w')
        pickle.dump(z,f_db)
        f_db.close()
        sim_data.append(z)
Example #15
0
def get_data(path,field="n",fun=None,start=0,stop=50,*args):
     
     data = np.squeeze(collect("n",tind=[start,stop],path=path,info=False))
     #print data.shape,start,stop,data.dtype.name
     #data_mmap = np.memmap(data,dtype=data.dtype.name,mode='w+',shape=data.shape)
     #data_mmap[:] = data[:]
     
     #print 'n_mmap.shape :',n_mmap.shape,n.shape
     #del data
     gc.collect()
     
     if fun is None:
         return data
     else:
         return fun(data)
def show_the_data(path, t=None, x=None, y=None, z=None):
    """Function which plots the data.

    Input
    path     -   the path of a run
    t        -   the desired t slice of showdata
    x        -   the desired x slice of showdata
    y        -   the desired y slice of showdata
    z        -   the desired z slice of showdata
    """

    print("Showing data from " + path)
    n = collect('n', xguards=False, yguards=False, path=path, info=False)

    # Show the data
    showdata(n[t,x,y,z])
def show_the_data(path, t=None, x=None, y=None, z=None):
    """Function which plots the data.

    Input
    path     -   the path of a run
    t        -   the desired t slice of showdata
    x        -   the desired x slice of showdata
    y        -   the desired y slice of showdata
    z        -   the desired z slice of showdata
    """

    print("Showing data from " + path)
    n = collect('n', xguards=False, yguards=False, path=path, info=False)

    # Show the data
    showdata(n[t, x, y, z])
Example #18
0
def momentum_loss_fraction(path, upstream_index=0, tind=-1):
    """
    The fraction of momentum lost between an upstream
    index and the end of the domain
    
    f_mom = \int F dl / p_u

    where F is the friction, dl the cell length, and 
    p_u the total (static+dynamic) pressure at upstream_index

    Inputs
    ------

    path  The directory containing the data
    upstream_index   The index to use as the upstream 
    tind   The time index. -1 is the final time
    
    Return
    ------
    
    Fraction of momentum lost
    
    """

    # Read the upstream static pressure
    p = collect("P", path=path, yind=upstream_index, tind=tind)
    p_u_static = np.asscalar(p)  # Convert to scalar

    # Calculate the upstream dynamic pressure
    n = collect("Ne", path=path, yind=upstream_index, tind=tind)
    nvi = collect("NVi", path=path, yind=upstream_index, tind=tind)
    p_u_dynamic = np.asscalar(nvi * nvi / n)

    # Total pressure p_u
    p_u = p_u_static + p_u_dynamic

    # Read the friction
    try:
        F = collect("F", path=path, yind=[upstream_index, -1],
                    tind=tind).flatten()
    except:
        print("No friction force 'F' found")
        F = 0.0

    # Need to integrate along length, so get cell spacing
    dy = collect("dy", path=path, yind=[upstream_index, -1]).flatten()
    g_22 = collect("g_22", path=path, yind=[upstream_index, -1]).flatten()

    dl = dy * np.sqrt(g_22)  # Cell length

    # F is already integrated over each cell, so just sum
    # contribution from each cell
    return np.sum(F * dl) / p_u
Example #19
0
def save(path='/home/cryosphere/BOUT/examples/Q3/data_short'): 
    print 'in post_bout.save'
    boutpath = path

    print path
    print sys.path
  
    meta = metadata(path=path)
   
    print 'ok got meta'
    output = OrderedDict()
    data = OrderedDict()

    con = sql.connect('test.db')
   
    all_modes = []
    print meta['evolved']['v'] #evolved fields for the given run

    for i,active in enumerate(meta['evolved']['v']): #loop over the fields
        print path, active
        data[active] = boutdata.collect(active,path=path)
        modes,ave = basic_info(data[active],meta)
Example #20
0
def collectSteadyN(steadyStatePath, yInd):
    #{{{docstring
    """
    Collects n in the steady state.

    Parameters
    ----------
    steadyStatePath : str
        Path to collect from
    yInd : int
        Index for the parallel coordinate.

    Returns
    -------
    n : array-4d
        The density at the steady state
    """
    #}}}

    # Check last t index
    with DataFile(os.path.join(steadyStatePath, "BOUT.dmp.0.nc")) as f:
        tLast = len(f.read("t_array")) - 1

    # In the steady state, the max gradient in "n" is the same
    # throughout in the domain, so we use yInd=0, zInd=0 in the
    # collect
    lnN = collect("lnN",\
                  path=steadyStatePath   ,\
                  xguards=False          ,\
                  yguards=False          ,\
                  yind   = [yInd, yInd]  ,\
                  zind   = [0   , 0]     ,\
                  tind   = [tLast, tLast],\
                  info=False)
    n = calcN(lnN, normalized=True)

    return n
import numpy as np
from sys import argv
from math import sqrt, log10, log, pi
from matplotlib import pyplot

gamma = 3.
if len(argv)>1:
  data_path = str(argv[1])
else:
  data_path = "data"

electron_mass = 9.10938291e-31
ion_mass =  3.34358348e-27

# Collect the data
Te = collect("T_electron", path=data_path, info=True, yguards=True)
Ti = collect("T_ion", path=data_path, info=True, yguards=True)
n = collect("n_ion", path=data_path, info=True, yguards=True)
V = collect("Vpar_ion", path=data_path, info=True, yguards=True)
q = collect("heat_flux", path=data_path, info=True, yguards=True)

q_electron_left = []
q_electron_right = []
right_index = len(Te[0,2,:,0])-4

for i in range(len(Te[:,2,0,0])):
  Te_left = old_div((Te[i,2,2,0]+Te[i,2,1,0]),2.)
  Ti_left = old_div((Ti[i,2,2,0]+Ti[i,2,1,0]),2.)
  n_left = old_div((n[i,2,2,0]+n[i,2,1,0]),2.)
  Te_right = old_div((Te[i,2,right_index,0]+Te[i,2,right_index+1,0]),2)
  Ti_right = old_div((Ti[i,2,right_index,0]+Ti[i,2,right_index+1,0]),2)
Example #22
0
import sys
import matplotlib
matplotlib.rcParams.update({'font.size': 18})
import matplotlib.pyplot as plt

if len(sys.argv) != 2:
    # Print usage information
    print("Usage: {0} path\n e.g. {0} data".format(sys.argv[0]))
    sys.exit(1)

path = sys.argv[1]
tind = -1

# Evolving variables, remove extra guard cells so just one each side
p = collect("P", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]

try:
    pn = collect("Pn", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
except:
    pn = zeros(p.shape)

nvi = collect("NVi", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
ne = collect("Ne", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
try:
    nvn = collect("NVn", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
except:
    nvn = zeros(p.shape)
try:
    nn = collect("Nn", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
except:
Example #23
0
def View3D(g,path=None, gb=None):
  ##############################################################################
  # Resolution
  
  n=51
  
  #compute Bxy
  [Br,Bz,x,y,q]=View2D(g,option=1)
  
  
  rd=g.r.max()+.5
  zd=g.z.max()+.5
  ##############################################################################
  # The grid of points on which we want to evaluate the field
  X, Y, Z = np.mgrid[-rd:rd:n*1j, -rd:rd:n*1j, -zd:zd:n*1j]
  ## Avoid rounding issues :
  #f = 1e4  # this gives the precision we are interested by :
  #X = np.round(X * f) / f
  #Y = np.round(Y * f) / f
  #Z = np.round(Z * f) / f
  
  r = np.c_[X.ravel(), Y.ravel(), Z.ravel()]
  
  ##############################################################################
  # Calculate field
  # First initialize a container matrix for the field vector :
  B = np.empty_like(r)
  
  
  #Compute Toroidal field
  # fpol is given between simagx (psi on the axis) and sibdry (
  # psi on limiter or separatrix). So the toroidal field (fpol/R) and the q profile are within these boundaries 
  # For each r,z we have psi thus we get fpol if (r,z) is within the boundary (limiter or separatrix) and fpol=fpol(outer_boundary) for outside
  
  #The range of psi is g.psi.max(), g.psi.min() but we have f(psi) up to the limit. Thus we use a new extended variable padded up to max psi
  # set points between psi_limit and psi_max
  
  add_psi=np.linspace(g.sibdry,g.psi.max(),10)
  
  # define the x (psi) array
  xf=np.arange(np.float(g.qpsi.size))*(g.sibdry-g.simagx)/np.float(g.qpsi.size-1) + g.simagx
  
  # pad the extra values excluding the 1st value
  
  xf=np.concatenate((xf, add_psi[1::]), axis=0)
  
  # pad fpol with corresponding points
  
  fp=np.lib.pad(g.fpol, (0,9), 'edge')
  
  # create interpolating function 
  
  f = interpolate.interp1d(xf, fp)
  
  #calculate Toroidal field
  
  Btrz = old_div(f(g.psi), g.r)
  
  
  rmin=g.r[:,0].min()
  rmax=g.r[:,0].max()
  zmin=g.z[0,:].min()
  zmax=g.z[0,:].max()
  
  
  B1p,B2p,B3p,B1t,B2t,B3t = magnetic_field(g,X,Y,Z,rmin,rmax,zmin,zmax, Br,Bz,Btrz)
  
  bpnorm = np.sqrt(B1p**2 + B2p**2 + B3p**2)
  btnorm = np.sqrt(B1t**2 + B2t**2 + B3t**2)
  
  BBx=B1p+B1t
  BBy=B2p+B2t
  BBz=B3p+B3t
  btotal = np.sqrt(BBx**2 + BBy**2 + BBz**2)
  
  Psi = psi_field(g,X,Y,Z,rmin,rmax,zmin,zmax)
  
  ##############################################################################
  # Visualization
  
  # We threshold the data ourselves, as the threshold filter produce a
  # data structure inefficient with IsoSurface
  #bmax = bnorm.max()
  #
  #B1[B > bmax] = 0
  #B2[B > bmax] = 0
  #B3[B > bmax] = 0
  #bnorm[bnorm > bmax] = bmax
  
  mlab.figure(1, size=(1080,1080))#, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5))
  
  mlab.clf()
  
  fieldp = mlab.pipeline.vector_field(X, Y, Z, B1p, B2p, B3p,
                                    scalars=bpnorm, name='Bp field')
                                    
  fieldt = mlab.pipeline.vector_field(X, Y, Z, B1t, B2t, B3t,
                                    scalars=btnorm, name='Bt field')
                                    
  field = mlab.pipeline.vector_field(X, Y, Z, BBx, BBy, BBz,
                                    scalars=btotal, name='B field')
                                    
                                    
                                    
  field2 = mlab.pipeline.scalar_field(X, Y, Z, Psi, name='Psi field')
  
  #vectors = mlab.pipeline.vectors(field,
  #                      scale_factor=1,#(X[1, 0, 0] - X[0, 0, 0]),
  #                      )
                        
  #vcp1 = mlab.pipeline.vector_cut_plane(fieldp,
  #                                        scale_factor=1,
  #                                        colormap='jet',
  #                                        plane_orientation='y_axes')
  ##                                        
  #vcp2 = mlab.pipeline.vector_cut_plane(fieldt,
  #                                        scale_factor=1,
  #                                        colormap='jet',
  #                                        plane_orientation='x_axes')
                                       
                                          
  # Mask random points, to have a lighter visualization.
  #vectors.glyph.mask_input_points = True
  #vectors.glyph.mask_points.on_ratio = 6
  
  #vcp = mlab.pipeline.vector_cut_plane(field1)
  #vcp.glyph.glyph.scale_factor=5*(X[1, 0, 0] - X[0, 0, 0])
  # For prettier picture:
  #vcp1.implicit_plane.widget.enabled = False
  #vcp2.implicit_plane.widget.enabled = False
  
  iso = mlab.pipeline.iso_surface(field2,
                                  contours=[Psi.min()+.01],
                                  opacity=0.4,
                                  colormap='bone')
  
  for i in range(q.size):      
      iso.contour.contours[i+1:i+2]=[q[i]]
                                                                                                                                                                                   
  iso.compute_normals = True
  # 
                                 
  #mlab.pipeline.image_plane_widget(field2,
  #                            plane_orientation='x_axes',
  #                            #slice_index=10,
  #                            extent=[-rd, rd, -rd, rd, -zd,zd]
  #                        )
  #mlab.pipeline.image_plane_widget(field2,
  #                            plane_orientation='y_axes',
  #                           # slice_index=10,
  #                            extent=[-rd, rd, -rd,rd, -zd,zd]
  #                        )
  
  
                          
  #scp = mlab.pipeline.scalar_cut_plane(field2,
  #                                        colormap='jet',
  #                                        plane_orientation='x_axes')
  # For prettier picture and with 2D streamlines:
  #scp.implicit_plane.widget.enabled = False
  #scp.enable_contours = True
  #scp.contour.number_of_contours = 20
                                                                                
  #   
  
  # Magnetic Axis
  
  s=mlab.pipeline.streamline(field)
  s.streamline_type = 'line'
  s.seed.widget = s.seed.widget_list[3]
  s.seed.widget.position=[g.rmagx,0.,g.zmagx]
  s.seed.widget.enabled = False
  
  
  #  q=i surfaces     
  
  for i in range(np.shape(x)[0]):
                                                             
      s=mlab.pipeline.streamline(field)
      s.streamline_type = 'line'
  ##s.seed.widget = s.seed.widget_list[0]
  ##s.seed.widget.center = 0.0, 0.0, 0.0
  ##s.seed.widget.radius = 1.725
  ##s.seed.widget.phi_resolution = 16
  ##s.seed.widget.handle_direction =[ 1.,  0.,  0.]
  ##s.seed.widget.enabled = False
  ##s.seed.widget.enabled = True
  ##s.seed.widget.enabled = False
  #
      if x[i].size>1 :
          s.seed.widget = s.seed.widget_list[3]
          s.seed.widget.position=[x[i][0],0.,y[i][0]]
          s.seed.widget.enabled = False
  
                         
  # A trick to make transparency look better: cull the front face
  iso.actor.property.frontface_culling = True
  
  #mlab.view(39, 74, 0.59, [.008, .0007, -.005])
  out=mlab.outline(extent=[-rd, rd, -rd, rd, -zd, zd], line_width=.5 )
  out.outline_mode = 'cornered'
  out.outline_filter.corner_factor = 0.0897222
  
  
  w = mlab.gcf()
  w.scene.camera.position = [13.296429046581462, 13.296429046581462, 12.979811259697154]
  w.scene.camera.focal_point = [0.0, 0.0, -0.31661778688430786]
  w.scene.camera.view_angle = 30.0
  w.scene.camera.view_up = [0.0, 0.0, 1.0]
  w.scene.camera.clipping_range = [13.220595435695394, 35.020427055647517]
  w.scene.camera.compute_view_plane_normal()
  w.scene.render()
  w.scene.show_axes = True
  
  mlab.show()
  
  if(path != None):
  #BOUT data
  #path='../Aiba/'
  #
  #gb = file_import(path+'aiba.bout.grd.nc')
  #gb = file_import("../cbm18_8_y064_x516_090309.nc")
  #gb = file_import("cbm18_dens8.grid_nx68ny64.nc")
  #gb = file_import("/home/ben/run4/reduced_y064_x256.nc")
      
    data = collect('P', path=path)
    data = data[50,:,:,:]
  #data0=collect("P0", path=path)
  #data=data+data0[:,:,None]
  
    s = np.shape(data)
    nz = s[2]
  
  
    sgrid = create_grid(gb, data, 1)
      
  # OVERPLOT the GRID    
  #mlab.pipeline.add_dataset(sgrid)
  #gr=mlab.pipeline.grid_plane(sgrid)
  #gr.grid_plane.axis='x'
  
  
  ## pressure scalar cut plane from bout 
    scpb = mlab.pipeline.scalar_cut_plane(sgrid,
                                          colormap='jet',
                                          plane_orientation='x_axes')
  
    scpb.implicit_plane.widget.enabled = False
    scpb.enable_contours = True
    scpb.contour.filled_contours=True
  #
    scpb.contour.number_of_contours = 20
  #
  #
  #loc=sgrid.points
  #p=sgrid.point_data.scalars
  
  # compute pressure from scatter points interpolation
  #pint=interpolate.griddata(loc, p, (X, Y, Z), method='linear')
  #dpint=np.ma.masked_array(pint,np.isnan(pint)).filled(0.)
  #
  #p2 = mlab.pipeline.scalar_field(X, Y, Z, dpint, name='P field')
  #
  #scp2 = mlab.pipeline.scalar_cut_plane(p2,
  #                                        colormap='jet',
  #                                        plane_orientation='y_axes')
  #
  #scp2.implicit_plane.widget.enabled = False
  #scp2.enable_contours = True
  #scp2.contour.filled_contours=True
  #scp2.contour.number_of_contours = 20
  #scp2.contour.minimum_contour=.001
  
  
  
  # CHECK grid orientation
  #fieldr = mlab.pipeline.vector_field(X, Y, Z, -BBx, BBy, BBz,
  #                                  scalars=btotal, name='B field')
  #
  #sg=mlab.pipeline.streamline(fieldr)
  #sg.streamline_type = 'tube'
  #sg.seed.widget = sg.seed.widget_list[3]
  #sg.seed.widget.position=loc[0]
  #sg.seed.widget.enabled = False
  
  
  
  #OUTPUT grid
  
  #ww = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts')
  #ww.write()
      
  return
Example #24
0
rcParams['font.size'] = 20.
rcParams['legend.fontsize'] = 'small'
rcParams['lines.linewidth'] = 2

if not os.path.exists('image'):
   os.makedirs('image')

g = file_import('../cbm18_dens8.grid_nx68ny64.nc')
psi = old_div((g['psixy'][:, 32] - g['psi_axis']), (g['psi_bndry'] - g['psi_axis']))

path = './data'

plt.figure()

#p0 = transpose(readsav(os.path.join(path, 'p0.idl.dat'))['p0'])
p0=collect('P0', path=path)

#dcp = transpose(readsav(os.path.join(path, 'dcp.idl.dat'))['dcp'])
p=collect('P', path=path)
res = moment_xyzt(p,'RMS','DC')
rmsp = res.rms
dcp = res.dc
nt = dcp.shape[2]

plt.plot(psi, p0[:, 32], 'k--', label='t=0')
plt.plot(psi, p0[:, 32] + dcp[old_div(nt,4), :, 32], 'r-', label='t='+np.str(old_div(nt,4)))
plt.plot(psi, p0[:, 32] + dcp[old_div(nt,2), :, 32], 'g-', label='t='+np.str(old_div(nt,2)))
plt.plot(psi, p0[:, 32] + dcp[3*nt/4, :, 32], 'b-', label='t='+np.str(3*nt/4))
plt.plot(psi, p0[:, 32] + dcp[-1, :, 32], 'c-', label='t='+np.str(nt))

plt.legend()
Example #25
0
from builtins import range
from past.utils import old_div
import numpy as np
from boututils import file_import, surface_average, showdata
from boutdata import collect
from pylab import plot, show, annotate, xlabel, ylabel, figure, xlim, ylim, legend, gca
import os

 
path='./data'

gfile='../cbm18_dens8.grid_nx68ny64.nc'
  
g = file_import(gfile)
   
var=collect("P", path=path)   
   
sol=surface_average(var, g)
#sol=np.mean(var,axis=3)

p0av=collect("P0", path=path)

q=np.zeros(sol.shape)

for i in range(sol.shape[1]):
    q[:,i]=sol[:,i]+p0av[:,0]
    
    
psixy=g.get('psixy')
psi0=g.get('psi_axis')
psix=g.get('psi_bndry')
Example #26
0
    sm.plot(r,alpha, lw=2, label=label, color='blue')
    sm.fill_between(r,alpha+sigma, alpha-sigma, facecolor='yellow', alpha=0.5)
    sm.set_title(title)
    #sm.set_yscale('symlog')
    sm.legend(loc='upper left')
    sm.set_xlabel(x_label)
    formatter = ticker.ScalarFormatter()
    formatter.set_powerlimits((-2, 2))  #force scientific notation
    sm.yaxis.set_major_formatter(formatter)
#sm.set_ylabel('$\alpha$')
    sm.grid()
    fig.savefig(pp, format=format)
    plt.close(fig)


a = np.squeeze(collect("alpha",path=path))
mask = np.squeeze(collect("alpha_mask",path=path))
n = np.squeeze(collect("n",path=path))

a_smooth = np.squeeze(collect("alpha_smooth",path=path))
a_smooth = a_smooth[2:-2,:]
a_smoothpy = ndimage.gaussian_filter(a, 15)
nx,ny = a.shape

q0= 3.
aa = 45.
b = 55
nu = 2.0

x = b*(.8+.2*np.arange(nx)/nx)
dx = np.squeeze(collect("dx",path=path,xind=[0,0]))
Example #27
0
    def compute_profile(self):
        self.linlam = []
        self.lam = []
        self.t = []
        self.pdf_x = []
        self.pdf_y = []
        self.df_x = []
        self.df_y = []
        nrms = []
        nave = []
        nmax = []
        nmin = []
        coarse_x = []
        xchunk = 10
        nx = self.nx
        nt = self.nt
        path = self.path
        xpos = np.arange(xchunk) * nx / xchunk
        xpos = list(xpos)
        xpos.append(nx - 1)
        self.nave = []
        for x in np.array(xpos):
            #print 'x: ', x / nx, nt
            coarse_x.append(x)
            #print x, round(nt / 2)
            sys.stdout = mystdout = StringIO()
            data = np.squeeze(collect('n', xind=[int(x), int(x) + 5], tind=[nt / 2, nt - 2], path=path, info=False))
            if self.log_n:
                data = np.exp(data)
            sys.stdout = old_stdout
            dshape = data.shape
            temp = np.reshape(data, [dshape[0], np.prod(dshape) / dshape[0]])
            nrms.append(temp.std(axis=1))
            try:
                nave.append(data.mean(axis=1).mean(axis=1))
                nmax.append(data.max(axis=1).max(axis=1))
                nmin.append(data.min(axis=1).min(axis=1))
            except:
                nave.append(data.mean(axis=1))
                nmax.append(data.max(axis=1))
                nmin.append(data.min(axis=1))

        nave = np.array(nave)
        nrms = np.array(nrms)
        nmax = np.array(nmax)
        nmin = np.array(nmin)
        print 'nave.shape: ', nave.shape, nmax.shape,
        print len(coarse_x), len(np.arange(nt - nt / 2))
        tarray = np.array(np.arange(nt - nt / 2 - 1))
        self.nave_net = np.transpose(nave)
        self.nrms = np.transpose(nrms)
        self.nmin = np.transpose(nmin)
        self.nmax = np.transpose(nmax)
        self.coarse_x = coarse_x
        self.moving_min = np.min(self.nmin - self.nave_net, axis=0)
        self.moving_max = np.max(self.nmax - self.nave_net, axis=0)
        self.moving_min_n = np.min((self.nmin - self.nave_net) / self.nrms, axis=0)
        self.moving_max_n = np.max((self.nmax - self.nave_net) / self.nrms, axis=0)
        nmin_net = []
        nmax_net = []
        nrms_net = []
        nave_net = []
        coarse_x = self.coarse_x
        xnew = np.arange(0, nx)
        for tt in tarray:
            f = interpolate.interp1d(coarse_x, nave[:, tt], kind='linear')
            nave_net.append(f(xnew))
            f = interpolate.interp1d(coarse_x, nrms[:, tt], kind='linear')
            nrms_net.append(f(xnew))
            f = interpolate.interp1d(coarse_x, nmin[:, tt], kind='linear')
            nmin_net.append(f(xnew))
            f = interpolate.interp1d(coarse_x, nmax[:, tt], kind='linear')
            nmax_net.append(f(xnew))

        nave_net = np.array(nave_net)
        nrms_net = np.array(nrms_net)
        nmin_net = np.array(nmin_net)
        nmax_net = np.array(nmax_net)
        moving_min_n = np.min((nmin_net - nave_net) / nrms_net, axis=0)
        moving_max_n = np.max((nmax_net - nave_net) / nrms_net, axis=0)
        moving_min = np.min(nmin_net - nave_net, axis=0)
        moving_max = np.max(nmax_net - nave_net, axis=0)
        return (moving_min_n,
         moving_max_n,
         moving_min,
         moving_max,
         nave_net,
         nrms_net,
         nmin_net,
         nmax_net)
Example #28
0
from __future__ import print_function
######
# Computes the rms of a variable and prints the growth rate value at the last timestep
###### 

import numpy as np
from boutdata import collect
from boututils import moment_xyzt



path='./data/'


p=collect('P',path=path)
rmsp_f=moment_xyzt(p[:,34:35,32:33,:], 'RMS').rms


print(np.gradient(np.log(rmsp_f[:,0,0]))[-1])

Example #29
0
    g.grid_plane.axis = 'x'
    mayavi.add_module(g)


if __name__ == '__main__':
    from boutdata import collect
    from boututils import file_import

    path = "/media/449db594-b2fe-4171-9e79-2d9b76ac69b6/runs/data_33/"
    #path="/home/ben/run4"

    #g = file_import("../cbm18_dens8.grid_nx68ny64.nc")
    g = file_import("data/cbm18_8_y064_x516_090309.nc")
    #g = file_import("/home/ben/run4/reduced_y064_x256.nc")

    data = collect("P", tind=50, path=path)
    data = data[0, :, :, :]
    s = np.shape(data)
    nz = s[2]

    bkgd = collect("P0", path=path)
    for z in range(nz):
        data[:, :, z] += bkgd

    # Create a structured grid
    sgrid = create_grid(g, data, 10)

    w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts')
    w.write()

    # View the structured grid
Example #30
0
import matplotlib.pyplot as plt

from boutdata import collect

f = collect("f", path="data")
yup = collect("yup", path="data")
ydown = collect("ydown", path="data")

plt.plot(f[0,4,4,:], label="f")
plt.plot(yup[4,4,:], label="f.yup")
plt.plot(ydown[4,4,:], label="f.ydown")

plt.legend()

plt.savefig("plot_interp.pdf")
plt.show()
Example #31
0
from __future__ import print_function
from numpy import *
from boutdata import collect

path='./data/'
data=collect('P',path=path)

print('Saving P..')
fa=fft.fft(data,axis=3)
save('fp',rollaxis(fa,0,4))

Example #32
0
# Turn the range of R and Z into index

R1d = R[:, 0]
Z1d = Z[0, :]

xind = [np.argmin(abs(R1d - r)) for r in Rrange]
zind = [np.argmin(abs(Z1d - z)) for z in Zrange]

R = R[xind[0]:(xind[1] + 1), zind[0]:(zind[1] + 1)]
Z = Z[xind[0]:(xind[1] + 1), zind[0]:(zind[1] + 1)]
if psi is not None:
    psi = psi[xind[0]:xind[1], zind[0]:zind[1]]

# Read the data

data = collect(variable, path=datapath, yind=yindex, xind=xind,
               zind=zind)[:, :, 0, :]
time = collect("t_array", path=datapath)

nt = len(time)  # Number of time points

# Get the levels to be used for contourf
data_min = np.amin(data)
data_max = np.amax(data)


def ceil_limit(value):
    """
    Rounds up values to 1 sig fig

    0.0032 -> 0.004
    12 -> 20.0
Example #33
0
def showdata(var, surf=0, polar=0, tslice=0, movie=0):
    plt.ioff()

    # Determine shape of array
    dims = array(var.shape)
    ndims = dims.shape[0]

    # Perform check to make sure that data is either 2D or 3D
    if (ndims < 2):
        print 'Error, data must be either 2 or 3 dimensional.  Exiting'
        sys.exit()

    if (ndims > 3):
        print 'Error, data must be either 2 or 3 dimensional.  Exiting'
        sys.exit()

    if ((ndims == 2) & (polar != 0)):
        print 'Error, data must be  3 dimensional (time, r, theta) for polar plots.  Exiting'
        sys.exit()

    # Collect time data from file
    if (tslice == 0):  # Only wish to collect time data if it matches
        t = collect('t_array')

    # Obtain size of data arrays
    Nt = var.shape[0]
    Nx = var.shape[1]
    if (ndims == 3):  # Only need Ny if we have third dimension
        Ny = var.shape[2]

    # Generate grid for plotting
    x = linspace(0, Nx, Nx)
    if (ndims == 3):
        y = linspace(0, Ny, Ny)

    # Determine range of data.  Used to ensure constant colour map and
    # to set y scale of line plot.
    fmax = max(var)
    fmin = min(var)

    # Set contour colour levels
    clevels = linspace(fmin, fmax, 25)

    # Create figures for animation plotting

    if (polar == 0):
        if (ndims == 2):
            fig = plt.figure()
            ax = plt.axes(xlim=(0, Nx), ylim=(fmin, fmax))
            line, = ax.plot([], [], lw=2)
            plt.xlabel(r'x')
            plt.ylabel(r'f')

        if (ndims == 3):
            if (surf == 0):
                fig = plt.figure()
                ax = plt.axes(xlim=(0, Nx), ylim=(0, Ny))
                plt.contourf(x, y, var[0, :, :].T, 25, lw=0, levels=clevels)
                plt.colorbar()
                plt.xlabel(r'x')
                plt.ylabel(r'y')
            else:
                fig = plt.figure()
                x, y = meshgrid(x, y)
                ax = fig.add_subplot(111, projection='3d')
                xstride = int(floor(Nx / 20))
                ystride = int(floor(Ny / 20))

    if (polar != 0):
        r = linspace(0, Nx - 1, Nx)
        theta = linspace(0, 2 * pi, Ny)
        r, theta = meshgrid(r, theta)
        fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))
        f = var[0, :, :].T
        graph = ax.contourf(theta, r, f, levels=clevels)
        plt.colorbar(graph)
        ax.set_rmax(Nx - 1)

    # animation functions
    def lineplot1(i):
        f = var[i, :]
        line.set_data(x, f)
        plt.title(r't = %1.2e' % t[i])
        return line,

    def lineplot2(i):
        f = var[i, :]
        line.set_data(x, f)
        plt.title(r't = %i' % i)
        return line,

    def contour1(i):
        f = var[i, :, :].T
        graph = plt.contourf(x, y, f, lw=0, levels=clevels)
        plt.title(r't = %1.2e' % t[i])
        return graph

    def contour2(i):
        f = var[i, :, :].T
        graph = plt.contourf(x, y, f, lw=0, levels=clevels)
        plt.title(r't = %i' % i)
        return graph

    def surface1(i):
        f = var[i, :, :].T
        ax = fig.add_subplot(111, projection='3d')
        graph = ax.plot_wireframe(x, y, f, rstride=ystride, cstride=xstride)
        ax.set_zlim(fmin, fmax)
        plt.title(r't = %1.2e' % t[i])
        return graph

    def surface2(i):
        f = var[i, :, :].T
        ax = fig.add_subplot(111, projection='3d')
        graph = ax.plot_wireframe(x, y, f, rstride=ystride, cstride=xstride)
        ax.set_zlim(fmin, fmax)
        plt.title(r't = %i' % i)
        return graph

    def polar1(i):
        f = var[i, :, :].T
        graph = ax.contourf(theta, r, f, levels=clevels)
        ax.set_rmax(Nx - 1)
        plt.title(r't = %1.2e' % t[i])
        return graph

    def polar2(i):
        f = var[i, :, :]
        graph = ax.contourf(theta, r, f, levels=clevels)
        ax.set_rmax(Nx - 1)
        plt.title(r't = %i' % i)
        return graph

    # Call animation functions
    if (polar == 0):
        if (ndims == 2):
            if (tslice == 0):
                anim = animation.FuncAnimation(fig, lineplot1, frames=Nt)
            else:
                anim = animation.FuncAnimation(fig, lineplot2, frames=Nt)

        elif (ndims == 3):

            if (surf == 0):
                if (tslice == 0):
                    anim = animation.FuncAnimation(fig, contour1, frames=Nt)
                else:
                    anim = animation.FuncAnimation(fig, contour2, frames=Nt)
            else:
                if (tslice == 0):
                    anim = animation.FuncAnimation(fig, surface1, frames=Nt)
                else:
                    anim = animation.FuncAnimation(fig, surface2, frames=Nt)

    if (polar != 0):
        if (tslice == 0):
            anim = animation.FuncAnimation(fig, polar1, frames=Nt)
        else:
            anim = animation.FuncAnimation(fig, polar1, frames=Nt)

    # Save movie with given name
    if ((isinstance(movie, basestring) == 1)):
        anim.save(movie + '.mp4')

    # Save movie with default name
    if ((isinstance(movie, basestring) == 0)):
        if (movie != 0):
            anim.save('animation.mp4')

    # Show animation
    if (movie == 0):
        plt.show()
Example #34
0
def showdata(vars, titles=[], legendlabels = [], surf = [], polar = [], tslice = 0, movie = 0, intv = 1, Ncolors = 25, x = [], y = []):
    """
    A Function to animate time dependent data from BOUT++
    Requires numpy, mpl_toolkits, matplotlib, boutdata libaries.  
    
    To animate multiple variables on different axes:
    showdata([var1, var2, var3])
    
    To animate more than one line on a single axes:
    showdata([[var1, var2, var3]])
    
    The default graph types are:
    2D (time + 1 spatial dimension) arrays = animated line plot
    3D (time + 2 spatial dimensions) arrays = animated contour plot.
    
    To use surface or polar plots:
    showdata(var, surf = 1)
    showdata(var, polar = 1)
    
    Can plot different graph types on different axes.  Default graph types will be used depending on the dimensions of the input arrays.  To specify polar/surface plots on different axes:
    showdata([var1,var2], surf = [1,0], polar = [0,1])
    
    Movies require FFmpeg to be installed.

    The tslice variable is used to control the time value that is printed on each
    frame of the animation.  If the input data matches the time values found within
    BOUT++'s dmp data files, then these time values will be used.  Otherwise, an
    integer counter is used.
    """
    plt.ioff()
    
    # Check to see whether vars is a list or not.
    if isinstance(vars, list):
        Nvar = len(vars)
    else:
        vars = [vars]
        Nvar = len(vars)

    if Nvar < 1:
        raise ValueError("No data supplied")

    # Check to see whether each variable is a list - used for line plots only
    Nlines = []
    for i in range(0, Nvar):
        if isinstance(vars[i], list):
            Nlines.append(len(vars[i]))
        else:
            Nlines.append(1)
            vars[i] = [vars[i]] 

    # Sort out titles
    if len(titles) == 0:
        for i in range(0,Nvar):
            titles.append(('Var' + str(i+1))) 
    elif len(titles) != Nvar:
        raise ValueError('The length of the titles input list must match the length of the vars list.')

    # Sort out legend labels
    if len(legendlabels) == 0:
        for i in range(0,Nvar):
            legendlabels.append([])
            for j in range(0,Nlines[i]):
                legendlabels[i].append(chr(97+j))
    elif (isinstance(legendlabels[0], list) != 1):
        if Nvar != 1:
            check = 0
            for i in range(0,Nvar):
                if len(legendlabels) != Nlines[i]:
                    check = check+1
            if check == 0:
                print "Warning, the legendlabels list does not contain a sublist for each variable, but it's length matches the number of lines on each plot. Will apply labels to each plot"
                legendlabelsdummy = []
                for i in range(0, Nvar):
                    legendlabelsdummy.append([])
                    for j in range(0,Nlines[i]):
                        legendlabelsdummy[i].append(legendlabels[j])
                legendlabels = legendlabelsdummy
            else:
                "Warning, the legendlabels list does not contain a sublist for each variable, and it's length does not match the number of lines on each plot. Will default apply labels to each plot"
                legendlabels = []
                for i in range(0,Nvar):
                    legendlabels.append([])
                    for j in range(0,Nlines[i]):
                        legendlabels[i].append(chr(97+j))
        else:
            if (Nlines[0] == len(legendlabels)):
                legendlabels = [legendlabels]
    elif len(legendlabels) != Nvar:
        print "Warning, the length of the legendlabels list does not match the length of the vars list, will continue with default values"
        legendlabels = []
        for i in range(0,Nvar):
            legendlabels.append([])
            for j in range(0,Nlines[i]):
                legendlabels[i].append(chr(97+j))
    else:
        for i in range(0,Nvar):
            if isinstance(legendlabels[i], list):
                if len(legendlabels[i]) != Nlines[i]:
                    print 'Warning, the length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values'
                legendlabels[i] = []
                for j in range(0,Nlines[i]):
                    legendlabels[i].append(chr(97+j))
            else:
                legendlabels[i] = [legendlabels[i]]
            if len(legendlabels[i]) != Nlines[i]:
                print 'Warning, the length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot.  Will continue with default values'
                legendlabels[i] = []
                for j in range(0,Nlines[i]):
                    legendlabels[i].append(chr(97+j))


    # Sort out surf list
    if isinstance(surf, list):
        if (len(surf) == Nvar):
            for i in range(0, Nvar):
                if surf[i] >= 1:
                    surf[i] = 1
                else:
                    surf[i] = 0
        elif (len(surf) == 1):
            if surf[0] >= 1:
                surf[0] = 1
            else:
                surf[0] = 0
            if (Nvar > 1):
                for i in range(1,Nvar):
                    surf.append(surf[0])
        elif (len(surf) == 0):
            for i in range(0,Nvar):
                surf.append(0)
        else:
            print 'Warning, length of surf list does not match number of variables.  Will default to no polar plots'
            for i in range(0,Nvar):
                surf.append(0)
            
    else:
        surf = [surf]
        if surf[0] >= 1:
            surf[0] = 1
        else:
            surf[0] = 0
        if (Nvar > 1):
            for i in range(1,Nvar):
                surf.append(surf[0])
                
    # Sort out polar list
    if isinstance(polar, list):
        if (len(polar) == Nvar):
            for i in range(0, Nvar):
                if polar[i] >= 1:
                    polar[i] = 1
                else:
                    polar[i] = 0
        elif (len(polar) == 1):
            if polar[0] >= 1:
                polar[0] = 1
            else:
                polar[0] = 0
            if (Nvar > 1):
                for i in range(1,Nvar):
                    polar.append(polar[0])
        elif (len(polar) == 0):
            for i in range(0,Nvar):
                polar.append(0)
        else:
            print 'Warning, length of polar list does not match number of variables.  Will default to no polar plots'
            for i in range(0,Nvar):
                polar.append(0)
    else:
        polar = [polar]
        if polar[0] >= 1:
            polar[0] = 1
        else:
            polar[0] = 0
        if (Nvar > 1):
            for i in range(1,Nvar):
                polar.append(polar[0])

    # Determine shapes of arrays
    dims = []
    Ndims = []
    lineplot = []
    contour = []
    for i in range(0,Nvar):
        dims.append([])
        Ndims.append([])
        for j in range(0, Nlines[i]):   
            dims[i].append(array((vars[i][j].shape)))
            Ndims[i].append(dims[i][j].shape[0])
            # Perform check to make sure that data is either 2D or 3D
            if (Ndims[i][j] < 2):
                raise ValueError('data must be either 2 or 3 dimensional.  Exiting')

            if (Ndims[i][j] > 3):
                raise ValueError('data must be either 2 or 3 dimensional.  Exiting')

            if ((Ndims[i][j] == 2) & (polar[i] != 0)):
                print 'Warning, data must be  3 dimensional (time, r, theta) for polar plots.  Will plot lineplot instead'

            if ((Ndims[i][j] == 2) & (surf[i] != 0)):
                print 'Warning, data must be  3 dimensional (time, x, y) for surface plots.  Will plot lineplot instead'

            if ((Ndims[i][j] == 3) & (Nlines[i] != 1)):
                raise ValueError('cannot have multiple sets of 3D (time + 2 spatial dimensions) on each subplot')
                

            if ((Ndims[i][j] != Ndims[i][0])):
                raise ValueError('Error, Number of dimensions must be the same for all variables on each plot.')
                
        if (Ndims[i][0] == 2): # Set polar and surf list entries to 0
            polar[i] = 0
            surf[i] = 0
            lineplot.append(1)
            contour.append(0)
        else:
            if ((polar[i] == 1) & (surf[i] == 1)):
                print 'Warning - cannot do polar and surface plots at the same time.  Default to contour plot'
                contour.append(1)
                lineplot.append(0)
                polar[i] = 0
                surf[i] = 0
            elif (polar[i] == 1) | (surf[i] == 1):
                contour.append(0)
                lineplot.append(0)
            else:
                contour.append(1)
                lineplot.append(0)

    # Obtain size of data arrays
    Nt = []
    Nx = []
    Ny = []
    for i in range(0, Nvar):
        Nt.append([])
        Nx.append([])
        Ny.append([])
        for j in range(0, Nlines[i]): 
            Nt[i].append(vars[i][j].shape[0])
            Nx[i].append(vars[i][j].shape[1])
            if (Nt[i][j] != Nt[0][0]):
                raise ValueError('time dimensions must be the same for all variables.')
                
            #if (Nx[i][j] != Nx[i][0]):
            #    raise ValueError('Dimensions must be the same for all variables on each plot.')  
                
            if (Ndims[i][j] == 3):
                Ny[i].append(vars[i][j].shape[2])
                #if (Ny[i][j] != Ny[i][0]):
                #    raise ValueError('Dimensions must be the same for all variables.')
                 
    # Collect time data from file
    if (tslice == 0):           # Only wish to collect time data if it matches 
        t = collect('t_array')
        if t == None:
            t = linspace(0,Nt[0][0], Nt[0][0])
    
    # Obtain number of frames
    Nframes = Nt[0][0]/intv

    # Generate grids for plotting
    x = []
    y = []
    for i in range(0,Nvar):
        x.append([])
        for j in range(0, Nlines[i]):
            x[i].append(linspace(0,Nx[i][j]-1, Nx[i][j]))
            
        #x.append(linspace(0,Nx[i][0]-1, Nx[i][0]))
        if (Ndims[i][0] == 3):
            y.append(linspace(0, Ny[i][0]-1, Ny[i][0]))
        else:
            y.append(0)
    
    # Determine range of data.  Used to ensure constant colour map and
    # to set y scale of line plot.
    fmax = []
    fmin = []
    xmax = []
    dummymax = []
    dummymin = []
    clevels = []
    for i in range(0,Nvar):
        
        dummymax.append([])
        dummymin.append([])
        for j in range(0,Nlines[i]):
            dummymax[i].append(max(vars[i][j]))
            dummymin[i].append(min(vars[i][j]))
        fmax.append(max(dummymax[i]))
        fmin.append(min(dummymin[i]))
        for j in range(0,Nlines[i]):
            dummymax[i][j] = max(x[i][j])
        xmax.append(max(dummymax[i]))
        
        clevels.append(linspace(fmin[i], fmax[i], Ncolors))
        
    # Create figures for animation plotting
    if (Nvar < 2):
        row = 1
        col = 1
        h = 6.0
        w = 8.0
    elif (Nvar <3):
        row = 1
        col = 2
        h = 6.0
        w = 12.0
    elif (Nvar < 5):
        row = 2
        col = 2
        h = 8.0
        w = 12.0
        
    elif (Nvar < 7):
        row = 2
        col = 3
        h = 8.0
        w = 14.0
        
    elif (Nvar < 10) :
        row = 3
        col = 3
        h = 12.0
        w = 14.0
    else:
        raise ValueError('too many variables...')
        

    fig = plt.figure(figsize=(w,h))
    title = fig.suptitle(r' ', fontsize=14  )

    # Initiate all list variables required for plotting here
    ax = []
    lines = []
    plots = []
    cbars = []
    xstride = []
    ystride = []
    r = []
    theta = []

    # Initiate figure frame
    for i in range(0,Nvar):
        lines.append([])
        if (lineplot[i] == 1):
            ax.append(fig.add_subplot(row,col,i+1))
            ax[i].set_xlim((0,xmax[i]))
            ax[i].set_ylim((fmin[i], fmax[i]))
            for j in range(0,Nlines[i]):
                lines[i].append(ax[i].plot([],[],lw=2, label = legendlabels[i][j])[0])
                #Need the [0] to 'unpack' the line object from tuple.  Alternatively:
                #lines[i], = lines[i]
            ax[i].set_xlabel(r'x')
            ax[i].set_ylabel(titles[i])
            if (Nlines[i] != 1):
                legendneeded = 1 
                for k in range(0,i):
                    if (Nlines[i] == Nlines[k]):
                        legendneeded = 0
                if (legendneeded == 1):
                    plt.axes(ax[i])
                    plt.legend(loc = 0)
            # Pad out unused list variables with zeros
            plots.append(0)
            cbars.append(0)
            xstride.append(0)
            ystride.append(0)
            r.append(0)
            theta.append(0)
            
        elif (contour[i] == 1):
            ax.append(fig.add_subplot(row,col,i+1))
            ax[i].set_xlim((0,Nx[i][0]-1))
            ax[i].set_ylim((0,Ny[i][0]-1))
            ax[i].set_xlabel(r'x')
            ax[i].set_ylabel(r'y')
            ax[i].set_title(titles[i])
            plots.append(ax[i].contourf(x[i][0],y[i],vars[i][0][0,:,:].T, Ncolors, lw=0, levels=clevels[i] ))
            plt.axes(ax[i])
            cbars.append(fig.colorbar(plots[i], format='%1.1e'))
            # Pad out unused list variables with zeros
            lines[i].append(0)
            xstride.append(0)
            ystride.append(0)
            r.append(0)
            theta.append(0)
            
        elif (surf[i] == 1):
            x[i][0],y[i] = meshgrid(x[i][0],y[i])
            if (Nx[i][0]<= 20):
                xstride.append(1)
            else:
                xstride.append(int(floor(Nx[i][0]/20)))
            if (Ny[i][0]<=20):
                ystride.append(1)
            else:
                ystride.append(int(floor(Ny[i][0]/20)))
            ax.append(fig.add_subplot(row,col,i+1, projection='3d'))
            plots.append(ax[i].plot_wireframe(x[i][0], y[i], vars[i][0][0,:,:].T, rstride=ystride[i], cstride=xstride[i]))
            title = fig.suptitle(r'', fontsize=14 )
            ax[i].set_xlabel(r'x')
            ax[i].set_ylabel(r'y')
            ax[i].set_zlabel(titles[i])
            # Pad out unused list variables with zeros
            lines[i].append(0)
            cbars.append(0)
            r.append(0)
            theta.append(0)
            
        elif (polar[i] == 1):
            r.append(linspace(1,Nx[i][0], Nx[i][0]))
            theta.append(linspace(0,2*pi, Ny[i][0]))
            r[i],theta[i] = meshgrid(r[i], theta[i])
            ax.append(fig.add_subplot(row,col,i+1, projection='polar'))
            plots.append(ax[i].contourf(theta[i], r[i], vars[i][0][0,:,:].T, levels=clevels[i]))
            plt.axes(ax[i])
            cbars.append(fig.colorbar(plots[i], format='%1.1e'))
            ax[i].set_rmax(Nx[i][0]-1)
            ax[i].set_title(titles[i])
            # Pad out unused list variables with zeros
            lines[i].append(0)
            xstride.append(0)
            ystride.append(0)

    # Animation function
    def animate(i):
        index = i*intv
        for j in range(0,Nvar):
            if (lineplot[j] == 1):
                for k in range(0,Nlines[j]):
                    lines[j][k].set_data(x[j][k], vars[j][k][index,:])
            elif (contour[j] == 1):
                plots[j] = ax[j].contourf(x[j][0],y[j],vars[j][0][index,:,:].T, Ncolors, lw=0, levels=clevels[j])
            elif (surf[j] == 1):
                ax[j] = fig.add_subplot(row,col,j+1, projection='3d')
                plots[j] = ax[j].plot_wireframe(x[j][0], y[j], vars[j][0][i,:,:].T, rstride=ystride[j], cstride=xstride[j])
                ax[j].set_zlim(fmin[j],fmax[j])
                ax[j].set_xlabel(r'x')
                ax[j].set_ylabel(r'y')
                ax[j].set_title(titles[j])
            elif (polar[j] == 1):
                plots[j] = ax[j].contourf(theta[j], r[j], vars[j][0][i,:,:].T, levels=clevels[j])
                ax[j].set_rmax(Nx[j][0]-1)

        if (tslice == 0):
            title.set_text('t = %1.2e' % t[index])
        else:
            title.set_text('t = %i' % index)
        return plots

   
    # Call Animation function
    anim = animation.FuncAnimation(fig, animate, frames=Nframes)

    # Save movie with given name
    if ((isinstance(movie,basestring)==1)):
        anim.save(movie+'.mp4')

    # Save movie with default name
    if ((isinstance(movie,basestring)==0)):
        if (movie != 0):
            anim.save('animation.mp4')

    # Show animation
    if (movie == 0):
        plt.show()
Example #35
0
    def dens_and_flux(self, t1, t2):
        sys.stdout = mystdout = StringIO()
        path = self.path
        dy = self.dy
        dx = self.dx
        # ny = self.ny
        # nx = self.nx
        
        if self.fast:
            data = np.squeeze(collect('n', tind=[t1, t1 + 1], path=path, info=False))
            ntt, nxx, nyy = data.shape
            vx = np.squeeze(np.gradient(np.squeeze(collect('phi', tind=[t1, t1 + 1], path=path, info=False)))[2]) / dy
            vy = np.squeeze(np.gradient(np.squeeze(collect('phi', tind=[t1, t1 + 1], path=path, info=False)))[1]) / dx
        else:
            try:
                data = np.squeeze(collect('n', tind=[t1, t2], path=path, info=False))
                vx = np.squeeze(np.gradient(np.squeeze(collect('phi', tind=[t1, t2], path=path, info=False)))[2]) / dy
                vy = np.squeeze(np.gradient(np.squeeze(collect('phi', tind=[t1, t2], path=path, info=False)))[1]) / dx
            except:
                data = np.squeeze(collect('n', tind=[t1, t2 - 1], path=path, info=False))
                vx = np.squeeze(np.gradient(np.squeeze(collect('phi', tind=[t1, t2 - 1], path=path, info=False)))[2]) / dy
                vy = np.squeeze(np.gradient(np.squeeze(collect('phi', tind=[t1, t2 - 1], path=path, info=False)))[1]) / dx

        if self.log_n:
            flux = np.exp(data) * vx
        else:
            flux = data * vx
        sys.stdout = old_stdout
        if self.log_n:
            n_ave = np.exp(data).mean(axis=0)
            n_ave = n_ave.mean(axis=1)
        else:
            n_ave = n.mean(axis=0)
            n_ave = n_ave.mean(axis=1)


        # b_DC, b_std,b_AC,b_AC_norm = mean_and_std(flux)
        # blob_fft = np.fft.rfft(b_AC_norm)
        # blob_power  = blob_fft.conj()*blob_fft
        # blob_R = np.real(np.fft.ifft(blob_power,axis=2)) #n 
        # temp = np.mean(blob_R[:,:,0:np.int(ny/8)],axis=0)
        # norm = np.amax(temp,axis=1)
        # norm = (np.repeat(norm,np.int(ny/3))).reshape(nx,np.int(ny/3))
        # temp = temp/norm
        # norm = (np.repeat(norm,np.int(ny/8))).reshape(nx,np.int(ny/8))
        # temp = temp/norm

        # z = np.array([((np.where(col > np.exp(-.5)*np.max(col)))[0]) for col in temp[:,:]])
        # for i,elem in enumerate(z):
        #     if len(elem) >0:
        #         z[i] = 2.0*(1./np.sqrt(2))*dy*(len(elem) + (-np.exp(-.5)+temp[i,max(elem)])/(temp[i,max(elem)]- temp[i,max(elem)+1]))


        

        vx = np.mean(np.mean(vx,axis=0),axis=1)
        vy = np.mean(np.mean(vy,axis=0),axis=1)
        
        dshape = data.shape
        if self.log_n:
            temp = np.transpose(data, [1, 0, 2])
        else:
            temp = np.transpose(data, [1, 0, 2])
        temp = np.reshape(temp, [dshape[1], np.prod(dshape) / dshape[1]])
        n_sigma = temp.std(axis=1)

        temp = np.transpose(flux, [1, 0, 2])
        temp = np.reshape(temp, [dshape[1], np.prod(dshape) / dshape[1]])

        flux_sigma = temp.std(axis=1)
        flux_max = temp.max(axis=1)
        flux_ave = flux.mean(axis=0)
        flux_ave = flux.mean(axis=1) #nx long, ave along y and t

        flux_hist = flux-flux_ave
        n_hist = n - n_ave

        return ((n, n_sigma), (flux, flux_sigma, flux_max),(vx,vy), data)
Example #36
0
#from boututils import DataFile

from traits.api import HasTraits, Range, Instance, on_trait_change

from traitsui.api import View, Item, Group

from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, MlabSceneModel


#Read in the data file, extracting the variable required
#f = DataFile("data/BOUT.dmp.0.nc")
#T = f.read("T")
#f.close()

T = collect("T", path="data")

#t = 0
#Returns the dataset from the variable to be plotted, here for a given time
def T_time(t):
    return T[t,:,:,0]

class Visualisation(HasTraits):
    time = Range(0,(len(T[:,0,0,0])-1))
    scene = Instance(MlabSceneModel, ())
    plot = Instance(PipelineBase)

#    def __init__(self):
#        HasTraits.__init__(self)
#        t = T_time(self.time)
#        self.plot = self.scene.mlab.imshow(T_time(self.time))
#
# Plot bubble position over time

from boutdata import collect

from numpy import zeros
import matplotlib.pyplot as plt

path = "bubble/"

dx = collect("dx", path=path)
dz = collect("dz", path=path)

vof = collect("vof", path=path)
nt, nx, _, nz = vof.shape

mid_x = (nx - 4.) / 2. + 1.5
mid_z = nz / 2.0 - 0.5  # Index at the middle

imx = int(mid_x)
imz = int(mid_z)

result = zeros(nt)

for tind in range(nt):
    # Find where vof transitions from 1 to 0
    data = vof[tind, :, 0, imz]

    indl = imx
    indh = nx - 1
    while indl != indh:
Example #38
0
#####################################
# Example of analysis of data
#####################################

import numpy as np
import pylab as plt
from boutdata import collect

path = './data/'
var = collect('P', path=path)

dcvar = np.mean(var, axis=3)
rmsvar = np.sqrt(np.mean(var**2, axis=3) - dcvar**2)

plt.figure()
plt.plot(rmsvar[:, 34, 32])
plt.show(block=False)
fvar = np.fft.rfft(var, axis=3)

plt.figure()
plt.plot(abs(fvar[:, 34, 32, 1:10]))
plt.show(block=False)

plt.figure()
plt.semilogy(abs(fvar[:, 34, 32, 1:7]))
plt.show(block=False)

plt.show()
Example #39
0
# print inp2

#read the data and process
t1= 0
t2 = 200
try:
     inp = read_inp(path=path,boutinp='BOUT.inp')
     inp = parse_inp(inp)
     
     dx = np.double(inp['[mesh]']['dx'])
     #print 'dx',dx
     zmax = np.double(inp['[main]']['ZMAX'])

     print dx,zmax
     n = (np.squeeze(collect("n",path=path,tind =[t1,t2])))
     print dx,zmax
     n0 = (np.squeeze(collect("n0",path=path,tind =[t1,t2])))
     u = np.squeeze(collect("u",path=path,tind =[t1,t2]))
     phi = np.squeeze(collect("phi",path=path,tind =[t1,t2]))
     time = np.squeeze(collect("t_array",path=path,xind=[0,0]))
     #dx = np.squeeze(collect("dx",path=path))
     #zmax = np.squeeze(collect("ZMAX",path=path))
    
     nt,nx,ny = n.shape
     dy = (2.0*np.pi*zmax)/(ny)
     dky = 1.0/zmax

     print 'dky', dky
except:
     print "fail"
Example #40
0
def show(path='/home/cryosphere/BOUT/examples/Q3/data_short', var='Ni'): 

    from . import read_grid,parse_inp, read_inp,basic_info
    import sys
    import pickle
    
    #print list(sys.modules.keys())

    try:
        sys.path.append('/home/cryosphere/BOUT/tools/pylib')
        sys.path.append('/home/cryosphere/BOUT/tools/pylib/boutdata')
        sys.path.append('/home/cryosphere/BOUT/tools/pylib/boututils')
        import matplotlib
        import gobject
        import numpy as np
        from ordereddict import OrderedDict
    except ImportError:
        print "can't find the modules I need, you fail"
        sys.exit() #no point in going on
        
    #import some bout specific modules
    try:
        import boutdata
        import boututils
    except:
        print "can't find bout related modules, you fail"
        sys.exit() #no point in going on, shoot yourself 
    
    #import modules for creating pdfs    
    try:
        import matplotlib.pyplot as plt
        from matplotlib.backends.backend_pdf import PdfPages
    except:
        print "can' find matplotlib"

    
    
    #lets collect the data
    
    boutpath = path
    
    print var 
    print path
    print sys.path
   
    ni = boutdata.collect(var,path=path)

    if len(ni.shape) ==4:
        nt,nx,ny,nz = ni.shape
    else:
        print "something with dimesions"

    data = ni[:,:,ny/2,:]
    pp = PdfPages('output.pdf')
    plt.figure()
    cs = plt.contour(data[nt/2,:,:])
    plt.clabel(cs, inline=1, fontsize=10)
    plt.title('Simplest default with labels')
    plt.savefig(pp, format='pdf')

    plt.figure()
    CS = plt.contour(data[0,:,:],6,
                    colors='k', # negative contours will be dashed by default
                    )
    plt.clabel(CS, fontsize=9, inline=1)
    plt.title('Single color - negative contours dashed')
    plt.savefig(pp, format='pdf')
   
    
    a = read_inp(path=path)
 
    b = parse_inp(a)
   
    #print b

   #now we also want to get some information from the grid file

    gridfile = b['[main]']['grid']
    #print b
    #print gridfile
    f = read_grid(gridfile)

    #we can try to bundle some key peices of data into a single meta data
    #container
    

    evolved = []
    #loop over the top level keys and look for any evolve subkeys, see what is evolved
    for section in b.keys():
        if 'evolve' in b[section]:
            if b[section]['evolve']=='true' or b[section]['evolve']=='True':
                evolved.append(section.strip('[]'))
                print evolved
            
    

    meta = OrderedDict()
    meta['dt'] = b['[main]']['TIMESTEP']
    meta['evolved'] = evolved
    meta['DZ'] = b['[main]']['ZMAX']#-b['[main]']['ZMIN']
    

    #now put everything you need into an ordered dictionary
    AA = b['[2fluid]']['AA']
    
    Ni0 = f.variables['Ni0'][:]*1.e14
    bmag = f.variables['bmag'][:]*1.e4 #to cgs
    Ni_x = f.variables['Ni_x'][:]*1.e14 # cm^-3
    rho_s = f.variables['Te_x'][:]/bmag # in cm

    # and so on just follow post_bout.pro, create a sep. function

    #find a nice way to print
    
   
    
    #loop over evoled fields ?
    #find out which are evolved to begin with
    
    output = OrderedDict()
    data = OrderedDict()

    all_modes = []
    for i,active in enumerate(meta['evolved']):
       data[active] = boutdata.collect(active,path=path)
       modes,ave = basic_info(data[active],meta)
       output[active] = {'modes':modes,'ave':ave}
       for x in output[active]['modes']:
          if x['k'][0] not in  all_modes:
              all_modes.append(x['k'][0])

    #rebuild with all the relevant modes
    output = OrderedDict()


    for i,active in enumerate(meta['evolved']):
        modes,ave = basic_info(data[active],meta,user_peak = all_modes)
        output[active] = {'modes':modes,'ave':ave}
    

    # let's make sure that if there any peaks that only appear
    # in one field can compared across all field
    
    
    
    
    # let pickle the results
    pickled_out = open('post_bout.pkl','wb')
    pickle.dump(output,pickled_out)
    pickled_out.close()

    
   
    plt.figure()
    #ax = plt.add_subplot(2,1,1)
    #ax.set_yscale('log')

    plt.semilogy(modes[0]['amp'].max(1))
    plt.title('simple plot - amp of the 1st dominant mode')
    plt.savefig(pp, format='pdf')

    plt.figure()
    plt.plot(ave['amp'])
    plt.plot(modes[0]['amp'].max(1))
    #plt.semilogy(modes[0]['amp'].max(1))
    plt.title('A_dom and A_ave')
    plt.savefig(pp,format='pdf')

    plt.figure()
    plt.semilogy(ave['amp'])
    plt.semilogy(modes[0]['amp'].max(1))
    #plt.semilogy(modes[0]['amp'].max(1))
    plt.title('A_dom and A_ave')
    plt.savefig(pp,format='pdf')


    plt.figure()
    plt.plot(modes[0]['phase'][3:,nx/2])
    #plt.semilogy(modes[1]['amp'].max(1))
    #plt.semilogy(modes[2]['amp'].max(1))
    #plt.semilogy(modes[0]['amp'].max(1))
    plt.title('phase')
    plt.savefig(pp,format='pdf')

    plt.figure()
    plt.semilogy(modes[0]['amp'][nt/2,:])
    plt.title('A[r] of the 1st mode at nt/2')
    plt.savefig(pp,format='pdf')
    
    #ax = plt.add_subplot(2,1,1)
    #ax.set_yscale('log')

    

    plt.close() 
    pp.close()

    return output
Example #41
0
def perform_MES_test(\
                     paths,\
                     extension     = 'png',\
                     show_plot     = False,\
                     xz_error_plot = False,\
                     xy_error_plot = False,\
                     use_dx        = True ,\
                     use_dy        = True ,\
                     use_dz        = True ,\
                     y_plane       = None ,\
                     z_plane       = None ,\
                     yguards       = False,\
                     ):
    """Collects the data members belonging to a convergence plot"""

    # Figure out the directions
    directions = {'dx': use_dx, 'dy': use_dy, 'dz': use_dz}

    # Make a variable to store the errors and the spacing
    data = {'error_2':[], 'error_inf':[], 'spacing':[],\
            'error_field':[]}

    # Loop over the runs in order to collect
    for path in paths:
        # Collect the error field
        error_field = collect('e', path=path, info=False,\
                              xguards = False, yguards = yguards)

        if len(error_field.shape) == 4:
            # Pick the last time point
            error_field = error_field[-1]

        # Add the error field to data
        Lx = collect('Lx', path=path, info=False)
        dx = collect('dx', path=path, info=False,\
                           xguards = False, yguards = yguards)
        dy = collect('dy', path=path, info=False,\
                           xguards = False, yguards = yguards)
        dz = collect('dz', path=path, info=False)
        data['error_field'].append({'field':abs(error_field),\
                                    'Lx':Lx                 ,\
                                    'dx':dx[0,0]            ,\
                                    'dy':dy[0,0]            ,\
                                    'dz':dz})

        # The error in the 2-norm and infintiy-norm
        data['error_2'].append(np.sqrt(np.mean(error_field**2.0)))
        data['error_inf'].append(np.max(np.abs(error_field)))

        # We are interested in the max of the spacings, so we make
        # an appendable list
        max_spacings = []
        # Collect the spacings
        if use_dx:
            max_spacings.append(np.max(dx))
        if use_dy:
            max_spacings.append(np.max(dy))
        if use_dz:
            max_spacings.append(np.max(dz))

        # Store the spacing in the data
        data['spacing'].append(np.max(max_spacings))

    # Sort the data
    data = sort_data(data)

    # Find the order of convergence in the 2 norm and infinity norm
    order_2, order_inf = get_order(data)

    # Get the root name of the path (used for saving files)
    root_folder = os.path.join(paths[0].split('/')[0], "MES_results")
    # Create folder if it doesn't exist
    if not os.path.exists(root_folder):
        os.makedirs(root_folder)
        print(root_folder + " created\n")

    # Get the name of the plot based on the first folder name
    name = paths[0].split('/')
    # Remove the root folder and put 'MES' in front
    name = 'MES'

    # Print the convergence rate
    print_convergence_rate(data, order_2, order_inf, root_folder, name)

    # Plot
    # We want to show the lines of the last orders, so we send in
    # order_2[-1] and order_inf[-1]
    do_plot(data, order_2[-1], order_inf[-1],\
            root_folder, name, extension,\
            xz_error_plot, xy_error_plot, show_plot,\
            y_plane, z_plane, directions)
Example #42
0
from boutdata import collect
import matplotlib.pyplot as plt
import numpy as np
import os

Nnorm = collect("Nnorm")
Tnorm = collect("Tnorm")

ni = collect("Nd+")[::10, 0, :, 0]  # Ion density
nn = collect("Nd")[::10, 0, :, 0]  # Neutral density
pe = collect("Pe")[::10, 0, :, 0]  # Electron pressure
pi = collect("Pd+")[::10, 0, :, 0]  # Ion pressure
pn = collect("Pd")[::10, 0, :, 0]  # Neutral pressure

Te = pe / np.clip(ni, 1e-5, None)  # Electron temperature
Ti = pi / np.clip(ni, 1e-5, None)  # Ion temperature
Tn = pn / np.clip(nn, 1e-5, None)  # Neutral temperature

n_max = max([np.amax(ni), np.amax(nn)]) * Nnorm
t_max = max([np.amax(Te), np.amax(Ti), np.amax(Tn)]) * Tnorm

for i in range(ni.shape[0]):
    fig, ax1 = plt.subplots()
    ax1.set_xlabel(r'Cell number')
    ax2 = ax1.twinx()

    ax1, ax2 = ax2, ax1  # Swap left-right

    ax1.set_ylabel('Density [m^-3]', color='b')
    ax1.tick_params('y', colors='b')
Example #43
0
    # Print usage information
    print("Usage: {0} path\n e.g. {0} data [zoom]".format(sys.argv[0]))
    sys.exit(1)

path = sys.argv[1]

if len(sys.argv) > 2:
    # Optional second argument is length from target
    zoomlength = float(sys.argv[2])
else:
    zoomlength = 1.0  # 1m default

tind = -1

# Evolving variables, remove extra guard cells so just one each side
P = collect("P", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
Ne = collect("Ne", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
NVi = collect("NVi", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]
Nn = collect("Nn", path=path, tind=tind, yguards=True)[-1, 0, 1:-1, 0]

# Normalisations
nnorm = collect("Nnorm", path=path, tind=tind)
tnorm = collect("Tnorm", path=path, tind=tind)
pnorm = nnorm * tnorm * 1.602e-19  # Converts p to Pascals
cs0 = collect("Cs0", path=path)

# electron temperature
Te = (0.5 * P / Ne) * tnorm

NVi *= nnorm * cs0
Ne *= nnorm
Example #44
0
elif len(argv)==2:
  try:
    end_index = int(argv[1])
    data_path = "data"
  except ValueError:
    end_index = -1
    data_path = str(argv[1])
elif len(argv)==3:
  end_index = int(argv[1])
  data_path = str(argv[2])
else:
  print "Arguments: '[end_index] [data_path]' or 'gamma [data_path]'"
  Exit(1)

# Collect the data
Te = collect("T_electron", path=data_path, xind=2, info=True, yguards=True)
Ti = collect("T_ion", path=data_path, xind=2, info=True, yguards=True)

if end_index<0:
  end_index = len(Te[:,0,0,0])

Te_left = []
Ti_left = []
for i in range(end_index):
	Te_left.append((Te[i,0,2,0]+Te[i,0,3,0])/2)
	Ti_left.append((Ti[i,0,2,0]+Ti[i,0,3,0])/2)

# Make plot
if len(argv)>2:
  pyplot.semilogx(Te_left[:end_index],'r',Ti_left[:end_index],'b')
  pyplot.title("Te (red) and Ti (blue) at the (left) boundary")
Example #45
0
import matplotlib

matplotlib.use("Agg")

import matplotlib.pyplot as plt
from boututils.datafile import DataFile
import numpy as np

with DataFile(gridfile) as grid:
    Rxy = grid["Rxy"]
    Zxy = grid["Zxy"]
    Bpxy = grid["Bpxy"]
    Bxy = grid["Bxy"]
    ixsep = grid["ixseps1"]

J = collect("J", path=path)
g_22 = collect("g_22", path=path)
dx = collect("dx", path=path)
dy = collect("dy", path=path)
dz = 2 * np.pi

dV = abs(J * dx * dy * dz)  # Volume element
dS = dV / (dy * np.sqrt(g_22))  # Area element

# Calculate distance along target

# Outer target
dR = Rxy[3:-1, -1] - Rxy[2:-2, -1]
dZ = Zxy[3:-1, -1] - Zxy[2:-2, -1]

dl = np.sqrt(dR**2 + dZ**2)
Example #46
0
#!/usr/bin/env python
#

path = "bubble"

from boutdata import collect
from numpy import arange, mean, sqrt, roll

import matplotlib.pyplot as plt
import matplotlib as mpl

mpl.rcParams.update({'font.size': 14})

vort = collect("vort", path=path)
dx = collect("dx", path=path)[0, 0]
dz = collect("dz", path=path)

time = collect("t_array", path=path) * 1e3

nt, nx, _, nz = vort.shape

mid_x = (nx - 4.) / 2. + 1.5

x = (arange(nx) - (nx / 2.) - 0.5) * dx * 1e3
z = (arange(nz) - (nz / 2.) + 0.5) * dz * 1e3

# Calculate RMS vorticity
vrms = sqrt(mean(mean(vort**2, axis=3), axis=1))

plt.plot(time, vrms)
plt.xlim([0, 100])
colors = ["tab:blue", "tab:green", "tab:orange", "tab:red"]

gridfile = "TCV_52061_t1.0_64x64_2e19.nc"
grid = DataFile(gridfile)
Rxy = grid["Rxy"]

ymid = np.argmax(Rxy[-1,:])
xsep = grid["ixseps1"]

rmid = Rxy[:,ymid]
rtarg = Rxy[:,-1]

fig, axs = plt.subplots(2, 3)

for path, color in zip(paths, colors):
    n = collect("ne", tind=-1, path=path)
    nnorm = collect("nnorm", path=path)
    n *= nnorm

    te = collect("te", tind=-1, path=path)
    tnorm = collect("tnorm", path=path)
    te *= tnorm

    axs[0,0].plot(rmid, n[0,:,ymid,0], color=color)
    axs[0,0].axvline(rmid[xsep], color="k", linestyle="--")
    
    axs[1,0].plot(rmid, te[0,:,ymid,0], color=color)
    axs[1,0].axvline(rmid[xsep], color="k", linestyle="--")

    axs[0,1].plot(rtarg, n[0,:,-1,0], color=color)
    axs[0,1].axvline(rtarg[xsep], color="k", linestyle="--") 
Example #48
0
path=sys.argv[1]
key=sys.argv[2]


cache='/scratch/01523/meyerson/'+key

if os.path.exists(cache):
    os.rmdir(cache)
    
os.makedirs(cache)

print path
meta = metadata(path=path)

from boutdata import collect
from boututils import savemovie
gamma = collect("Gammax",yind=[5,5],path=path)
ni = collect("Ni",yind=[5,5],path=path)
rho = collect("rho",yind=[5,5],path=path)

#one movie per cpu
print ni.shape, gamma.shape
savemovie(gamma[:,:,0,:],ni[:,:,0,:],moviename='movie_'+key+'.avi',
          meta=meta,cache=cache+"/",overcontour=True)

savemovie(rho[:,:,0,:],ni[:,:,0,:],moviename='movie_phi_ni'+key+'.avi',
          meta=meta,cache=cache+"/",overcontour=True)

#os.rmdir(cache)

  else:
    return -1.

massunit = 1.602176565e-19
electron_mass = 9.10938291e-31/massunit
electron_charge = -1.602176565e-19
ion_mass =  3.34358348e-27/massunit
ion_charge = 1.602176565e-19
epsilon_0 = 8.85418781762039e-12/pow(massunit,-1)
logLambda = 16.0

stagger = True
t_interval = 10

# Collect the data
Te = collect("T_electron", path="data", info=True, yguards=True)
Ti = collect("T_ion", path="data", info=True, yguards=True)
n = collect("n_ion", path="data", info=True, yguards=True)

ylength = len(Te[0,2,:,0])
tlength = len(Te[:,2,0,0])

try: dy = collect("dy", path="data", info=True, yguards=True)
except TypeError:
  print "Warning, could not find dy, setting to 1.0"
  dy=[[1.]*ylength]*5

try: g_22 = collect("g_22", path="data", info=True, yguards=True)
except TypeError:
  print "Warning, could not find g_22, setting to (80./256)^2"
  g_22=[[pow(80./256,2)]*ylength]*5
Example #50
0
from boutdata import collect

import numpy as np
from numpy import *

Bx=collect('Bx',path='/home/colt/Research/BOUT/examples/Firehose/data')

import matplotlib.pyplot as plt
from matplotlib import *

xlen=32
ylen=128
Bxdata=zeros([ylen,xlen])
Bjdata=zeros(ylen)
Bfield=zeros([2,ylen])
dely=0.05
delx=0.05

plt.figure()

for i in range(4):
  Bxdata=Bx[7*i,:,:,1]
  plt.subplot(2,2,i+1)
  for j in range(8):
    Bjdata=Bxdata[4*j,:]
    Bfield[0,0]=4*j*delx
    Bfield[1,0]=0
    for k in range(ylen-1):
      Bfield[0,k+1]=Bfield[0,k]+Bjdata[k]*delx/(Bjdata[k]**2+1)**2
      Bfield[1,k+1]=Bfield[1,k]+dely/(Bjdata[k]**2+1)**2
      
Example #51
0
from boutdata import collect
import matplotlib.pyplot as plt

n = collect("Ne")

fig, axs = plt.subplots(1,3, figsize=(9,3))

for ax, time in zip(axs, [15, 30, 45]):
    ax.contourf(n[time, :, 0, :].T, 50)

plt.tight_layout()
    
plt.savefig("blob2d-te-ti.png")

plt.show()
  else:
    return -1.

massunit = 1.602176565e-19
electron_mass = old_div(9.10938291e-31,massunit)
electron_charge = -1.602176565e-19
ion_mass =  old_div(3.34358348e-27,massunit)
ion_charge = 1.602176565e-19
epsilon_0 = old_div(8.85418781762039e-12,pow(massunit,-1))
logLambda = 16.0

stagger = True
t_interval = 10

# Collect the data
Te = collect("T_electron", path="data", info=True, yguards=True)
Ti = collect("T_ion", path="data", info=True, yguards=True)
n = collect("n_ion", path="data", info=True, yguards=True)

ylength = len(Te[0,2,:,0])
tlength = len(Te[:,2,0,0])

try: dy = collect("dy", path="data", info=True, yguards=True)
except TypeError:
  print("Warning, could not find dy, setting to 1.0")
  dy=[[1.]*ylength]*5

try: g_22 = collect("g_22", path="data", info=True, yguards=True)
except TypeError:
  print("Warning, could not find g_22, setting to (80./256)^2")
  g_22=[[pow(old_div(80.,256),2)]*ylength]*5
Example #53
0
            xind = None

        print 'Y slices (-1 for all), Range=', [0,ny]
        yind = [int(raw_input('min: ')), int(raw_input('max: '))]
        if np.min(yind) < 0 or np.max(yind) > ny:
            yind = None

        print 'Z slices (-1 for all), Range=', [0,nz]
        zind = [int(raw_input('min: ')), int(raw_input('max: '))]
        if np.min(zind) < 0 or np.max(zind) > nz:
            zind = None
    
        # Creates a global variable of the same name of the variable in the file
        # The collect routine accumulates all the data using the slices requested
        # Sometimes throws up a memeory error if too much data is requested.
        globals()[str(f.handle.variables.keys()[v])] = collect(f.handle.variables.keys()[v], path=path, tind=tind, xind=xind, yind=yind, zind=zind)
        
    else:
        # If there's less than 4 dimensions, it just grabs the whole lot
        globals()[str(f.handle.variables.keys()[v])] = collect(f.handle.variables.keys()[v], path=path)
        
    # At the users request, a file with the name of the variable and numpy extension can be made
    # in the directory with the data
    if query_yes_no('Save .npy backup?', default='no') == 'yes':
        print 'Saving variable'
        np.save(path + str('/') + str(f.handle.variables.keys()[v].npy), str(f.handle.variables.keys()[v]))

    if query_yes_no('Import another variable?', default='yes') == 'yes':
        c = 0
    else:
        c = 1
Example #54
0
    ni = np.shape(rmsp_f)[1]
    nj = np.shape(rmsp_f)[2]

    growth = np.zeros((ni, nj))

    with np.errstate(divide='ignore'):

        for i in range(ni):
            for j in range(nj):
                growth[i, j] = np.gradient(np.log(rmsp_f[tind::, i, j]))[-1]

    d = np.ma.masked_array(growth, np.isnan(growth))

    # masked arrays
    # http://stackoverflow.com/questions/5480694/numpy-calculate-averages-with-nans-removed

    print('Total average growth rate= ',
          np.mean(np.ma.masked_array(d, np.isinf(d))))
    if y != None:
        print(
            'Growth rate in plane', y, '= ',
            np.mean(np.ma.masked_array(growth[:, y], np.isnan(growth[:, y]))))


#test
if __name__ == '__main__':
    path = '/Users/brey/BOUT/bout/examples/elm-pb/data'

    data = collect("P", path=path)

    avgrate(data, 32)
Example #55
0
    g = GridPlane()
    g.grid_plane.axis = 'x'
    mayavi.add_module(g)

if __name__ == '__main__':
    from boutdata import collect
    from boututils import file_import
    
    path = "/media/449db594-b2fe-4171-9e79-2d9b76ac69b6/runs/data_33/"
    #path="/home/ben/run4"

    #g = file_import("../cbm18_dens8.grid_nx68ny64.nc")
    g = file_import("data/cbm18_8_y064_x516_090309.nc")
    #g = file_import("/home/ben/run4/reduced_y064_x256.nc")
    
    data = collect("P", tind=50, path=path)
    data = data[0,:,:,:]
    s = np.shape(data)
    nz = s[2]
    
    bkgd = collect("P0", path=path)
    for z in range(nz):
        data[:,:,z] += bkgd

    # Create a structured grid
    sgrid = create_grid(g, data, 10)
    

    w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts')
    w.write()
    
Example #56
0
def View3D(g, path=None, gb=None):
    ##############################################################################
    # Resolution

    n = 51

    #compute Bxy
    [Br, Bz, x, y, q] = View2D(g, option=1)

    rd = g.r.max() + .5
    zd = g.z.max() + .5
    ##############################################################################
    # The grid of points on which we want to evaluate the field
    X, Y, Z = np.mgrid[-rd:rd:n * 1j, -rd:rd:n * 1j, -zd:zd:n * 1j]
    ## Avoid rounding issues :
    #f = 1e4  # this gives the precision we are interested by :
    #X = np.round(X * f) / f
    #Y = np.round(Y * f) / f
    #Z = np.round(Z * f) / f

    r = np.c_[X.ravel(), Y.ravel(), Z.ravel()]

    ##############################################################################
    # Calculate field
    # First initialize a container matrix for the field vector :
    B = np.empty_like(r)

    #Compute Toroidal field
    # fpol is given between simagx (psi on the axis) and sibdry (
    # psi on limiter or separatrix). So the toroidal field (fpol/R) and the q profile are within these boundaries
    # For each r,z we have psi thus we get fpol if (r,z) is within the boundary (limiter or separatrix) and fpol=fpol(outer_boundary) for outside

    #The range of psi is g.psi.max(), g.psi.min() but we have f(psi) up to the limit. Thus we use a new extended variable padded up to max psi
    # set points between psi_limit and psi_max

    add_psi = np.linspace(g.sibdry, g.psi.max(), 10)

    # define the x (psi) array
    xf = np.arange(np.float(g.qpsi.size)) * (
        g.sibdry - g.simagx) / np.float(g.qpsi.size - 1) + g.simagx

    # pad the extra values excluding the 1st value

    xf = np.concatenate((xf, add_psi[1::]), axis=0)

    # pad fpol with corresponding points

    fp = np.lib.pad(g.fpol, (0, 9), 'edge')

    # create interpolating function

    f = interpolate.interp1d(xf, fp)

    #calculate Toroidal field

    Btrz = old_div(f(g.psi), g.r)

    rmin = g.r[:, 0].min()
    rmax = g.r[:, 0].max()
    zmin = g.z[0, :].min()
    zmax = g.z[0, :].max()

    B1p, B2p, B3p, B1t, B2t, B3t = magnetic_field(g, X, Y, Z, rmin, rmax, zmin,
                                                  zmax, Br, Bz, Btrz)

    bpnorm = np.sqrt(B1p**2 + B2p**2 + B3p**2)
    btnorm = np.sqrt(B1t**2 + B2t**2 + B3t**2)

    BBx = B1p + B1t
    BBy = B2p + B2t
    BBz = B3p + B3t
    btotal = np.sqrt(BBx**2 + BBy**2 + BBz**2)

    Psi = psi_field(g, X, Y, Z, rmin, rmax, zmin, zmax)

    ##############################################################################
    # Visualization

    # We threshold the data ourselves, as the threshold filter produce a
    # data structure inefficient with IsoSurface
    #bmax = bnorm.max()
    #
    #B1[B > bmax] = 0
    #B2[B > bmax] = 0
    #B3[B > bmax] = 0
    #bnorm[bnorm > bmax] = bmax

    mlab.figure(1, size=(1080,
                         1080))  #, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5))

    mlab.clf()

    fieldp = mlab.pipeline.vector_field(X,
                                        Y,
                                        Z,
                                        B1p,
                                        B2p,
                                        B3p,
                                        scalars=bpnorm,
                                        name='Bp field')

    fieldt = mlab.pipeline.vector_field(X,
                                        Y,
                                        Z,
                                        B1t,
                                        B2t,
                                        B3t,
                                        scalars=btnorm,
                                        name='Bt field')

    field = mlab.pipeline.vector_field(X,
                                       Y,
                                       Z,
                                       BBx,
                                       BBy,
                                       BBz,
                                       scalars=btotal,
                                       name='B field')

    field2 = mlab.pipeline.scalar_field(X, Y, Z, Psi, name='Psi field')

    #vectors = mlab.pipeline.vectors(field,
    #                      scale_factor=1,#(X[1, 0, 0] - X[0, 0, 0]),
    #                      )

    #vcp1 = mlab.pipeline.vector_cut_plane(fieldp,
    #                                        scale_factor=1,
    #                                        colormap='jet',
    #                                        plane_orientation='y_axes')
    ##
    #vcp2 = mlab.pipeline.vector_cut_plane(fieldt,
    #                                        scale_factor=1,
    #                                        colormap='jet',
    #                                        plane_orientation='x_axes')

    # Mask random points, to have a lighter visualization.
    #vectors.glyph.mask_input_points = True
    #vectors.glyph.mask_points.on_ratio = 6

    #vcp = mlab.pipeline.vector_cut_plane(field1)
    #vcp.glyph.glyph.scale_factor=5*(X[1, 0, 0] - X[0, 0, 0])
    # For prettier picture:
    #vcp1.implicit_plane.widget.enabled = False
    #vcp2.implicit_plane.widget.enabled = False

    iso = mlab.pipeline.iso_surface(field2,
                                    contours=[Psi.min() + .01],
                                    opacity=0.4,
                                    colormap='bone')

    for i in range(q.size):
        iso.contour.contours[i + 1:i + 2] = [q[i]]

    iso.compute_normals = True
    #

    #mlab.pipeline.image_plane_widget(field2,
    #                            plane_orientation='x_axes',
    #                            #slice_index=10,
    #                            extent=[-rd, rd, -rd, rd, -zd,zd]
    #                        )
    #mlab.pipeline.image_plane_widget(field2,
    #                            plane_orientation='y_axes',
    #                           # slice_index=10,
    #                            extent=[-rd, rd, -rd,rd, -zd,zd]
    #                        )

    #scp = mlab.pipeline.scalar_cut_plane(field2,
    #                                        colormap='jet',
    #                                        plane_orientation='x_axes')
    # For prettier picture and with 2D streamlines:
    #scp.implicit_plane.widget.enabled = False
    #scp.enable_contours = True
    #scp.contour.number_of_contours = 20

    #

    # Magnetic Axis

    s = mlab.pipeline.streamline(field)
    s.streamline_type = 'line'
    s.seed.widget = s.seed.widget_list[3]
    s.seed.widget.position = [g.rmagx, 0., g.zmagx]
    s.seed.widget.enabled = False

    #  q=i surfaces

    for i in range(np.shape(x)[0]):

        s = mlab.pipeline.streamline(field)
        s.streamline_type = 'line'
        ##s.seed.widget = s.seed.widget_list[0]
        ##s.seed.widget.center = 0.0, 0.0, 0.0
        ##s.seed.widget.radius = 1.725
        ##s.seed.widget.phi_resolution = 16
        ##s.seed.widget.handle_direction =[ 1.,  0.,  0.]
        ##s.seed.widget.enabled = False
        ##s.seed.widget.enabled = True
        ##s.seed.widget.enabled = False
        #
        if x[i].size > 1:
            s.seed.widget = s.seed.widget_list[3]
            s.seed.widget.position = [x[i][0], 0., y[i][0]]
            s.seed.widget.enabled = False

    # A trick to make transparency look better: cull the front face
    iso.actor.property.frontface_culling = True

    #mlab.view(39, 74, 0.59, [.008, .0007, -.005])
    out = mlab.outline(extent=[-rd, rd, -rd, rd, -zd, zd], line_width=.5)
    out.outline_mode = 'cornered'
    out.outline_filter.corner_factor = 0.0897222

    w = mlab.gcf()
    w.scene.camera.position = [
        13.296429046581462, 13.296429046581462, 12.979811259697154
    ]
    w.scene.camera.focal_point = [0.0, 0.0, -0.31661778688430786]
    w.scene.camera.view_angle = 30.0
    w.scene.camera.view_up = [0.0, 0.0, 1.0]
    w.scene.camera.clipping_range = [13.220595435695394, 35.020427055647517]
    w.scene.camera.compute_view_plane_normal()
    w.scene.render()
    w.scene.show_axes = True

    mlab.show()

    if (path != None):
        #BOUT data
        #path='../Aiba/'
        #
        #gb = file_import(path+'aiba.bout.grd.nc')
        #gb = file_import("../cbm18_8_y064_x516_090309.nc")
        #gb = file_import("cbm18_dens8.grid_nx68ny64.nc")
        #gb = file_import("/home/ben/run4/reduced_y064_x256.nc")

        data = collect('P', path=path)
        data = data[50, :, :, :]
        #data0=collect("P0", path=path)
        #data=data+data0[:,:,None]

        s = np.shape(data)
        nz = s[2]

        sgrid = create_grid(gb, data, 1)

        # OVERPLOT the GRID
        #mlab.pipeline.add_dataset(sgrid)
        #gr=mlab.pipeline.grid_plane(sgrid)
        #gr.grid_plane.axis='x'

        ## pressure scalar cut plane from bout
        scpb = mlab.pipeline.scalar_cut_plane(sgrid,
                                              colormap='jet',
                                              plane_orientation='x_axes')

        scpb.implicit_plane.widget.enabled = False
        scpb.enable_contours = True
        scpb.contour.filled_contours = True
        #
        scpb.contour.number_of_contours = 20
    #
    #
    #loc=sgrid.points
    #p=sgrid.point_data.scalars

    # compute pressure from scatter points interpolation
    #pint=interpolate.griddata(loc, p, (X, Y, Z), method='linear')
    #dpint=np.ma.masked_array(pint,np.isnan(pint)).filled(0.)
    #
    #p2 = mlab.pipeline.scalar_field(X, Y, Z, dpint, name='P field')
    #
    #scp2 = mlab.pipeline.scalar_cut_plane(p2,
    #                                        colormap='jet',
    #                                        plane_orientation='y_axes')
    #
    #scp2.implicit_plane.widget.enabled = False
    #scp2.enable_contours = True
    #scp2.contour.filled_contours=True
    #scp2.contour.number_of_contours = 20
    #scp2.contour.minimum_contour=.001

    # CHECK grid orientation
    #fieldr = mlab.pipeline.vector_field(X, Y, Z, -BBx, BBy, BBz,
    #                                  scalars=btotal, name='B field')
    #
    #sg=mlab.pipeline.streamline(fieldr)
    #sg.streamline_type = 'tube'
    #sg.seed.widget = sg.seed.widget_list[3]
    #sg.seed.widget.position=loc[0]
    #sg.seed.widget.enabled = False

    #OUTPUT grid

    #ww = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts')
    #ww.write()

    return
        data_path = "data"
    except ValueError:
        end_index = -1
        data_path = str(argv[1])
elif len(argv) == 3:
    end_index = int(argv[1])
    data_path = str(argv[2])
else:
    print "Arguments: '[end_index] [data_path]' or '[data_path]'"
    Exit(1)

electron_mass = 9.10938291e-31
ion_mass = 3.34358348e-27

# Collect the data
f = collect("effective_alpha_e", path=data_path, info=True)
#Te = collect("T_electron", path=data_path, xind=2, info=True, yguards=True)
#Ti = collect("T_ion", path=data_path, xind=2, info=True, yguards=True)
#n = collect("n_ion", path="data", xind=2, info=True, yguards=True)

if end_index < 0:
    end_index = len(f[:])

alpha0 = 1
alphaoveralpha0 = []
for i in range(end_index):
    alphaoveralpha0.append(f[i] / alpha0)
    print i, alphaoveralpha0[i]

# Make plot
pyplot.figure(1, facecolor='w')
Example #58
0
# Check command-line arguments
if len(sys.argv) < 2:
    # Print usage information
    print("Usage: {0} path\n e.g. {0} data [zoom]".format(sys.argv[0]))
    sys.exit(1)
    
# First argument is the path
path = sys.argv[1]

if len(sys.argv) > 2:
    # Optional second argument is length from target
    zoomlength = float(sys.argv[2])
else:
    zoomlength = 1.0 # 1m default

t = collect("t_array", path=path)
J = collect("J", path=path)[0,:]
dy = collect("dy", path=path)[0,:]
dV = J*dy # Volume element

tind = len(t)-1 # Get the last time point

var_list = ["PeSource",      # Input pressure source
            "Ne", "P",       # Plasma profiles
            "Srec", "Siz",   # Particle sources / sinks
            "Frec", "Fiz", "Fcx", "Fel",   # Momentum source / sinks to neutrals
            "Rrec", "Riz", "Rzrad", "Rex", # Radiation, energy loss from system
            "E", "Erec", "Eiz", "Ecx", "Eel",   # Energy transfer between neutrals and plasma
            "Nnorm", "Tnorm", "Omega_ci", "rho_s0", "Cs0"]  # Normalisations

########################################################