def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False, call_pdb=False): import nose, os, sys, yt from yt.funcs import mylog orig_level = mylog.getEffectiveLevel() mylog.setLevel(50) nose_argv = sys.argv nose_argv += ['--exclude=answer_testing', '--detailed-errors', '--exe'] if call_pdb: nose_argv += ["--pdb", "--pdb-failures"] if verbose: nose_argv.append('-v') if run_answer_tests: nose_argv.append('--with-answer-testing') if answer_big_data: nose_argv.append('--answer-big-data') initial_dir = os.getcwd() yt_file = os.path.abspath(yt.__file__) yt_dir = os.path.dirname(yt_file) os.chdir(yt_dir) try: nose.run(argv=nose_argv) finally: os.chdir(initial_dir) mylog.setLevel(orig_level)
def ReynoldsDecomp(nfiles, path, basename, fieldname): time = np.zeros(nfiles) # Initialize time array for i in range(nfiles): # Append the correct index to each file if i > 9 and i <= 99: file = basename + "00" + str(i) elif i > 99 and i <= 999: file = basename + "0" + str(i) elif i > 999: file = basename + str(i) else: file = basename + "000" + str(i) # Check if file exist or path is correct try: my_abs_path = Path(path + file).resolve() except FileNotFoundError: print("{} file does not exist or path is wrong!\n".format(file)) else: mylog.setLevel(40) print("Reading file {}".format(file)) ds = yt.load(path + file) # Load the file to the scope data = ds.r[fieldname] time[i] = ds.current_time if i == 0: averaged_data = np.zeros(len(data)) # Initialize accumulator averaged_data = averaged_data + data average = averaged_data / nfiles fluctuations = data - average return (average, fluctuations)
def __init__(self, table_type, redshift=0.0, data_dir=None, use_metals=True): mylog.setLevel(50) filename = _get_data_file(table_type, data_dir=data_dir) only_on_root(mylog.info, "Loading emissivity data from %s." % filename) in_file = h5py.File(filename, "r") if "info" in in_file.attrs: only_on_root(mylog.info, parse_h5_attr(in_file, "info")) if parse_h5_attr(in_file, "version") != data_version[table_type]: raise ObsoleteDataException(table_type) else: only_on_root(mylog.info, "X-ray '%s' emissivity data version: %s." % \ (table_type, parse_h5_attr(in_file, "version"))) self.log_T = in_file["log_T"][:] self.emissivity_primordial = in_file["emissivity_primordial"][:] if "log_nH" in in_file: self.log_nH = in_file["log_nH"][:] if use_metals: self.emissivity_metals = in_file["emissivity_metals"][:] self.ebin = YTArray(in_file["E"], "keV") in_file.close() self.dE = np.diff(self.ebin) self.emid = 0.5*(self.ebin[1:]+self.ebin[:-1]).to("erg") self.redshift = redshift
def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False, call_pdb = False): import nose, os, sys, yt from yt.funcs import mylog orig_level = mylog.getEffectiveLevel() mylog.setLevel(50) nose_argv = sys.argv nose_argv += ['--exclude=answer_testing','--detailed-errors', '--exe'] if call_pdb: nose_argv += ["--pdb", "--pdb-failures"] if verbose: nose_argv.append('-v') if run_answer_tests: nose_argv.append('--with-answer-testing') if answer_big_data: nose_argv.append('--answer-big-data') initial_dir = os.getcwd() yt_file = os.path.abspath(yt.__file__) yt_dir = os.path.dirname(yt_file) os.chdir(yt_dir) try: nose.run(argv=nose_argv) finally: os.chdir(initial_dir) mylog.setLevel(orig_level)
def getFileName(initfile, finalfile, path, basename): nfiles = finalfile - initfile files = [None] * nfiles for i in range(initfile, finalfile): # Append the correct index to each file if i > 9 and i <= 99: file = basename + "00" + str(i) elif i > 99 and i <= 999: file = basename + "0" + str(i) elif i > 999: file = basename + str(i) else: file = basename + "000" + str(i) # Check if file exist or path is correct try: my_abs_path = Path(path + file).resolve() except FileNotFoundError: print("{} file does not exist or path is wrong!\n".format(file)) else: mylog.setLevel(40) # print("Reading file {}".format(file)) files[i - initfile] = file return files
#average = pod.FlowAverage(nfiles, path, fieldName, files) time = np.zeros(nfiles) b = np.zeros(nfiles) else: files = None time = None b = None p_time = np.zeros(local_nfiles) p_b = np.zeros(local_nfiles) comm.Barrier() # Children wait for Master node to finish files = comm.bcast(files, root=0) for i in range(local_start, local_end): # complete the file name with the correct index print("Reading file ", files[i], ' with processor ', Pid) # Load file mylog.setLevel(40) # Show no yt INFO in command prompt ds = yt.load(path + files[i]) center = ds.domain_left_edge sp = ds.sphere(center, r_max) rp0 = yt.create_profile(sp, 'radius', fieldName, n_bins=p_res, units={ 'radius': 'cm', "density": "kg/m**3", "specific_volume": "m**3/kg" }, logs={ 'radius': False, "density": False,
def read_virial(self, num, Rsph_over_Rcl=1.95, prefix='virial', savdir=None, force_override=False): """ Function to calculate volume integral of various thermal/magnetic/gravitational energy terms in virial theorem Also calculates center of mass, half mass radius of neutral gas etc. """ # Print no log messages from yt.funcs import mylog mylog.setLevel(50) ds = self.load_vtk(num, load_method='yt') da = ds.all_data() # Set threshold xCL = 'specific_scalar_CL' xCL_min = 1e-2 # Cloud gas if xCL > xCL_min xneu_min = 0.5 # Neutral if xHI + 2xH2 > xneu_min xH2_min = 0.25 x0 = ds.domain_center # Volume of a cell in cm**3 dV = ((ds.domain_width / ds.domain_dimensions).prod()).to('pc**3') # Find indices for cloud and neutral portion of it idx_neu = (da['xHI'] + 2.0 * da['xH2'] > xneu_min) idx_ion = (da['xHI'] + 2.0 * da['xH2'] <= xneu_min) idx_cl = da[xCL] > xCL_min idx_neu_cl = (da['xHI'] + 2.0 * da['xH2'] > xneu_min) & (da[xCL] > xCL_min) idx_ion_cl = (da['xHI'] + 2.0 * da['xH2'] <= xneu_min) & (da[xCL] > xCL_min) idx_H2_cl = (2.0 * da['xH2'] > xH2_min) & (da[xCL] > xCL_min) # Calculate initial magnetic field if self.par['configure']['gas'] == 'hydro': mhd = False else: mhd = True if mhd: muB = self.par['problem']['muB'] else: muB = np.inf M0 = self.par['problem']['M_cloud'] R0 = self.par['problem']['R_cloud'] B0mag = 1.3488004135072468e-05 * (M0 * 1e-5) * (20.0 / R0)**2 * ( 2.0 / muB) * yu.gauss thetaB = self.par['problem']['theta_B0'] * np.pi / 180.0 phiB = self.par['problem']['phi_B0'] * np.pi / 180.0 B0 = B0mag * yt.YTArray([ np.sin(thetaB) * np.cos(phiB), np.sin(thetaB) * np.sin(phiB), np.cos(thetaB) ]) print('B0', B0) # Save results to a dictionary r = dict() # r['model'] = self.basename r['Rsph_over_Rcl'] = Rsph_over_Rcl r['time_code'] = ds.current_time.item() r['time'] = ds.current_time.to('Myr') # Mass r['Mgas_tot'] = (da['density'].sum() * dV).to('Msun') r['Mgas_neu'] = (da['density'][idx_neu].sum() * dV).to('Msun') r['Mgas_ion'] = (da['density'][idx_ion].sum() * dV).to('Msun') r['Mgas_cl'] = (da['density'][idx_cl].sum() * dV).to('Msun') r['Mgas_neu_cl'] = (da['density'][idx_neu_cl].sum() * dV).to('Msun') r['Mgas_ion_cl'] = (da['density'][idx_ion_cl].sum() * dV).to('Msun') r['Mgas_H2_cl'] = (da['density'][idx_H2_cl].sum() * dV).to('Msun') # Volume r['V_tot'] = ds.domain_width.prod().to('pc**3') r['V_neu'] = dV * idx_neu.sum() r['V_ion'] = dV * idx_ion.sum() r['V_cl'] = dV * idx_cl.sum() r['V_neu_cl'] = dV * idx_neu_cl.sum() r['V_ion_cl'] = dV * idx_ion_cl.sum() r['V_H2_cl'] = dV * idx_H2_cl.sum() # Center of mass r['xCM'] = yt.YTArray([(da[ax]*da['density']).sum()/da['density'].sum() \ for ax in ('x','y','z')]).to('pc') r['xCM_cl'] = yt.YTArray([(da[ax][idx_cl]*da['density'][idx_cl]).sum()/\ da['density'][idx_cl].sum() \ for ax in ('x','y','z')]).to('pc') r['xCM_neu_cl'] = yt.YTArray([(da[ax][idx_neu_cl]*da['density'][idx_neu_cl]).sum()/\ da['density'][idx_neu_cl].sum() \ for ax in ('x','y','z')]).to('pc') r['xCM_ion_cl'] = yt.YTArray([(da[ax][idx_ion_cl]*da['density'][idx_ion_cl]).sum()/\ da['density'][idx_ion_cl].sum() \ for ax in ('x','y','z')]).to('pc') # Add fields ds = self.add_fields_virial(ds, mhd, x0=x0, xCM_neu_cl=r['xCM_neu_cl']) # Calculate radius that encloses xx % of neutral cloud mass # (distance is measured from xCM_neu_cl) # and corresponding free-fall time M_encl_tot = r['Mgas_neu_cl'] dist_neu_cl = da['dist_neu_cl'][idx_neu_cl] idx_srt = np.argsort(dist_neu_cl) dist_neu_cl_srt = dist_neu_cl[idx_srt] M_encl = (da['density'][idx_neu_cl][idx_srt].cumsum() * dV).to('Msun') percentage = [50, 67, 90, 95] for p in percentage: idx = np.where(M_encl / M_encl_tot > p * 1e-2)[0] r['R{0:d}'.format(p)] = dist_neu_cl_srt[idx[0]] r['rho{0:d}'.format(p)] = p*1e-2*M_encl_tot/\ (4.0*np.pi*r['R{0:d}'.format(p)]**3/3.0) r['tff{0:d}'.format(p)] = np.sqrt(3.0*np.pi/\ (32.0*yu.G*r['rho{0:d}'.format(p)])).to('Myr') r['Ekin_neu_cl'] = (0.5*da['density'][idx_neu_cl]* \ da['velocity_magnitude'][idx_neu_cl]**2*dV).sum().to('erg') r['Ekin_ion_cl'] = (0.5*da['density'][idx_ion_cl]* \ da['velocity_magnitude'][idx_ion_cl]**2*dV).sum().to('erg') r['vdisp_neu_cl'] = (np.sqrt(2.0 * r['Ekin_neu_cl'] / r['Mgas_neu_cl'])).to('km/s') r['vdisp_ion_cl'] = (np.sqrt(2.0 * r['Ekin_ion_cl'] / r['Mgas_ion_cl'])).to('km/s') # Velocity dispersion within spheres of radii R50, R67, R90, R95 for p in percentage: sph_p = ds.sphere(r['xCM_neu_cl'], (r['R{0:d}'.format(p)].value.item(), "pc")) idx_p = (sph_p['xHI'] + 2.0 * sph_p['xH2'] > xneu_min) & (sph_p[xCL] > xCL_min) for ax in ('x', 'y', 'z'): r[f'vmean_{ax}_neu_cl_{p}'] = \ ((sph_p['density']*sph_p[f'velocity_{ax}'])[idx_p].sum()/ (sph_p['density'][idx_p]).sum()).to('km/s') r[f'vrms_{ax}_neu_cl_{p}'] = \ (sph_p['density'][idx_p]*((sph_p[f'velocity_{ax}'])[idx_p] - r[f'vmean_{ax}_neu_cl_{p}'])**2).sum()/\ (sph_p['density'][idx_p].sum()) r[f'vrms_{ax}_neu_cl_{p}'] = np.sqrt( r[f'vrms_{ax}_neu_cl_{p}']).to('km/s') for ax in ('x', 'y', 'z'): r[f'vmean_{ax}_neu_cl'] = ( (da['density'] * da[f'velocity_{ax}'])[idx_neu_cl].sum() / (da['density'][idx_neu_cl]).sum()).to('km/s') r[f'vrms_{ax}_neu_cl'] =\ (da['density'][idx_neu_cl]*((da[f'velocity_{ax}'])[idx_neu_cl] - r[f'vmean_{ax}_neu_cl'])**2).sum()/\ (da['density'][idx_neu_cl].sum()) r[f'vrms_{ax}_neu_cl'] = np.sqrt(r[f'vrms_{ax}_neu_cl']).to('km/s') Rcl = self.par['problem']['R_cloud'] sph_ = ds.sphere(x0, (1.99 * Rcl, "pc")) surf = ds.surface(sph_, "dist", (Rsph_over_Rcl * Rcl, "pc")) sph = ds.sphere(x0, (Rsph_over_Rcl * Rcl, "pc")) # These are the coordinates of all triangle vertices in the surface triangles = surf.triangles # construct triange normal vectors w1 = surf.triangles[:, 1] - surf.triangles[:, 0] w2 = surf.triangles[:, 1] - surf.triangles[:, 2] # calculate triangle areas vector_area = np.cross(w1, w2) / 2 * (yu.pc**2).to('cm**2') scalar_area = np.linalg.norm(vector_area, axis=1) surf_area = scalar_area.sum() # idx for neutral cloud within sphere idx = (sph['xHI'] + 2.0 * sph['xH2'] > xneu_min) & (sph[xCL] > xCL_min) # Surface integral if mhd: rdotTM = np.column_stack([surf['rdotTM_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.erg/yu.cm**2) rdotPi = np.column_stack([surf['rdotPi_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.erg/yu.cm**2) rdotPi_thm = np.column_stack([surf['rdotPi_thm_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.erg/yu.cm**2) rhovrsq = np.column_stack([surf['rhovrsq_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.g/yu.s) r['Mgas_sph'] = ((sph['cell_volume'] * sph['density']).sum()).to('Msun') r['Mgas_neu_cl_sph'] = ((sph['cell_volume'][idx] * sph['density'][idx]).sum()).to('Msun') r['I_E'] = ((sph['cell_volume'] * sph['rho_rsq']).sum()).to('g*cm**2') r['I_E_neu_cl'] = ((sph['cell_volume'][idx] * sph['rho_rsq'][idx]).sum()).to('g*cm**2') r['V_neu_cl_sph'] = ((sph['cell_volume'][idx]).sum()).to('pc**3') # Effective radius # Incorrect r['R_rms'] = np.sqrt(5.0 / 3.0 * r['I_E'] / r['Mgas_sph']).to('pc') r['R_rms_neu_cl'] = np.sqrt(5.0 / 3.0 * r['I_E_neu_cl'] / r['Mgas_neu_cl_sph']).to('pc') # Flux of momentum of inertia r['S_surf_sph'] = 0.5 * np.sum(vector_area * rhovrsq) # Surface integral of Reynolds stress r['T_surf_sph'] = 0.5 * np.sum(vector_area * rdotPi) # Surface integral of Reynolds stress (only themal pressure term) r['T_surf_thm_sph'] = 0.5 * np.sum(vector_area * rdotPi_thm) # Thermal + kinetic energy (for sphere) r['T_thm_sph'] = 1.5 * ( (sph['cell_volume'] * sph['pressure']).sum()).to('erg') r['T_kin_sph'] = 0.5 * ((sph['cell_volume'] * sph['density'] * sph['velocity_magnitude']**2).sum()).to('erg') r['T_thm_neu_cl_sph'] = 1.5 * ( (sph['cell_volume'][idx] * sph['pressure'][idx]).sum()).to('erg') r['T_kin_neu_cl_sph'] = 0.5 * ( (sph['cell_volume'][idx] * sph['density'][idx] * sph['velocity_magnitude'][idx]**2).sum()).to('erg') # Thermal + kinetic energy (for entire volume) r['T_thm'] = 1.5 * ( (da['cell_volume'] * da['pressure']).sum()).to('erg') r['T_kin'] = 0.5 * ((da['cell_volume'] * da['density'] * da['velocity_magnitude']**2).sum()).to('erg') r['T_thm_neu_cl'] = 1.5 * ( (da['cell_volume'][idx_neu_cl] * da['pressure'][idx_neu_cl]).sum()).to('erg') r['T_kin_neu_cl'] = 0.5 * ( (da['cell_volume'][idx_neu_cl] * da['density'][idx_neu_cl] * da['velocity_magnitude'][idx_neu_cl]**2).sum()).to('erg') # Surface integral of Maxwell stress if mhd: r['M_surf_sph'] = np.sum(vector_area * rdotTM) # Alternative way # r['M_surf_'] = surf.calculate_flux('rdotTM_x', 'rdotTM_y', 'rdotTM_z', fluxing_field="ones") # Volume integral of magnetic energy density r['M_vol_sph'] = (sph['cell_volume'] * sph['magnetic_energy']).sum().to('erg') r['M_sph'] = r['M_vol_sph'] + r['M_surf_sph'] # Define average magnetic field on surface r['B_surf_avg_x'] = (surf['magnetic_field_x'] * scalar_area).sum() / surf_area r['B_surf_avg_y'] = (surf['magnetic_field_y'] * scalar_area).sum() / surf_area r['B_surf_avg_z'] = (surf['magnetic_field_z'] * scalar_area).sum() / surf_area r['B_surf_avg_mag'] = np.sqrt(r['B_surf_avg_x']**2 + r['B_surf_avg_y']**2 + r['B_surf_avg_z']**2) # Alternative method to calculate surface-averaged B-field magnitude # Bmag^2 = -6*M_surf/R_sph^3 r['B_surf_avg_mag_alt'] = (np.sqrt( -6.0 * r['M_surf_sph'] / ((r['Rsph_over_Rcl'] * Rcl * yu.pc)**3))).to('gauss') # Magnetic energy as obtaind by volume integral of B^2 - B_avg^2 # Using B_avg=B0. This is incorrect because B_avg changes with time r['M_neu_cl0_sph'] = ( ((sph['magnetic_field_magnitude'][idx]**2 - B0mag**2) * sph['cell_volume'][idx]).sum()).to('erg') / (8.0 * np.pi) # This will be our fiducial choice r['M_neu_cl_sph'] = ( ((sph['magnetic_field_magnitude'][idx]**2 - r['B_surf_avg_mag']**2) * sph['cell_volume'][idx]).sum()).to('erg') / (8.0 * np.pi) # Alternative r['M_neu_cl_sph_alt'] = ( ((sph['magnetic_field_magnitude'][idx]**2 - r['B_surf_avg_mag_alt']**2) * sph['cell_volume'][idx]).sum()).to('erg') / (8.0 * np.pi) r['M_neu_cl0'] = (( (da['magnetic_field_magnitude'][idx_neu_cl]**2 - B0mag**2) * da['cell_volume'][idx_neu_cl]).sum()).to('erg') / (8.0 * np.pi) r['M_neu_cl'] = (( (da['magnetic_field_magnitude'][idx_neu_cl]**2 - r['B_surf_avg_mag']**2) * da['cell_volume'][idx_neu_cl]).sum()).to('erg') / (8.0 * np.pi) r['M_neu_cl_alt'] = (( (da['magnetic_field_magnitude'][idx_neu_cl]**2 - r['B_surf_avg_mag_alt']**2) * da['cell_volume'][idx_neu_cl]).sum()).to('erg') / (8.0 * np.pi) else: r['M_surf_sph'] = 0.0 r['M_vol_sph'] = 0.0 r['M_sph'] = 0.0 r['B_surf_avg_x'] = 0.0 r['B_surf_avg_y'] = 0.0 r['B_surf_avg_z'] = 0.0 r['B_surf_avg_mag'] = 0.0 r['B_surf_avg_mag_alt'] = 0.0 r['M_neu_cl0_sph'] = 0.0 r['M_neu_cl_sph'] = 0.0 r['M_neu_cl_sph_alt'] = 0.0 r['M_neu_cl0'] = 0.0 r['M_neu_cl'] = 0.0 r['M_neu_cl_alt'] = 0.0 # Gravitational term (gravitational energy in the absence of external potential) r['W_sph'] = (sph['density']*\ (sph['x']*sph['gravitational_potential_gradient_x'] +\ sph['y']*sph['gravitational_potential_gradient_y'] +\ sph['z']*sph['gravitational_potential_gradient_z'])* sph['cell_volume']).sum().to('erg') r['W_neu_cl_sph'] = (sph['density'][idx]*\ (sph['x'][idx]*sph['gravitational_potential_gradient_x'][idx] +\ sph['y'][idx]*sph['gravitational_potential_gradient_y'][idx] +\ sph['z'][idx]*sph['gravitational_potential_gradient_z'][idx])* sph['cell_volume'][idx]).sum().to('erg') r['W'] = (da['density']*\ (da['x']*da['gravitational_potential_gradient_x'] +\ da['y']*da['gravitational_potential_gradient_y'] +\ da['z']*da['gravitational_potential_gradient_z'])* da['cell_volume']).sum().to('erg') r['W_neu_cl'] = (da['density'][idx_neu_cl]*\ (da['x'][idx_neu_cl]*da['gravitational_potential_gradient_x'][idx_neu_cl] +\ da['y'][idx_neu_cl]*da['gravitational_potential_gradient_y'][idx_neu_cl] +\ da['z'][idx_neu_cl]*da['gravitational_potential_gradient_z'][idx_neu_cl])* da['cell_volume'][idx_neu_cl]).sum().to('erg') return r
import sys import yt from yt.funcs import mylog mylog.setLevel(50) import numpy as np from yt import derived_field import matplotlib.pyplot as plt from yt.units import yr, Myr, pc,gram,second from scipy.interpolate import interp1d from yt.data_objects.particle_filters import add_particle_filter from yt.fields.derived_field import \ ValidateGridType, \ ValidateParameter, \ ValidateSpatial, \ NeedsParameter from astropy.table import Table , Column def _Disk_H(field, data): center = data.get_field_parameter('center') z = data["z"] - center[2] return np.abs(z) yt.add_field("Disk_H", function=_Disk_H, units="pc", take_log=False, validators=[ValidateParameter('center')]) def _radial_velocity(field,data): if data.has_field_parameter("bulk_velocity"): bv = data.get_field_parameter("bulk_velocity").in_units("cm/s")
'---Processor---\n') comm.Barrier() t_start = MPI.Wtime() # Start run time measurement for i in range(local_start, local_end): # complete the file name with the correct index if i > 9 and i <= 99: file = fname + "00" + str(i) elif i > 99 and i <= 999: file = fname + "0" + str(i) elif i > 999: file = fname + str(i) else: file = fname + "000" + str(i) # Load file mylog.setLevel(40) # Show INFO only in command prompt ds = yt.load(file) #ad = ds.all_data() #fd = ad['gas', fieldName] print(' ',file,'\t\t', float(ds.current_time)*1000, \ '\t\t', Pid) rad = 15.0 res = 2048 xmix = np.array([]) ymix = np.array([]) rmix = np.array([]) mylog.setLevel(40) # Hide output log in command line for j in range( 0, 90): # Loop for changing the angle of the line from 0 to 90 deg xp = rad * np.cos(
def plot_kin_e(): # set ytlog.setLevel(40) to suppress logging up to a certain level # set ytlog.setLevel(20) to enable more logging ytlog.setLevel(40) # load data as time series of individual data sets # would yt recognise our data as 2d? Since override_geometry does not seem to work? ts = yt.load(data_dir + basename + '????.dat') # initialise arrays times = [] E_kin = [] Palinstrophy = [] Enstrophy = [] grid_level = 2 # maximal refinement level used # add field: kinetic energy per cell # the cell-volume is important as weight, s.t. data with different mesh sizes can be compared # TODO yt does not recognise our data as 2d def _kinetic_energy_per_cell(field, data): return data["kinetic_energy_density"] * data["cell_volume"] yt.add_field(("gas", "_kinetic_energy_per_cell"), function=_kinetic_energy_per_cell, units='g*cm**2/s**2') # add field: kinetic energy per cell # the cell-volume is important as weight, s.t. data with different mesh sizes can be compared # TODO yt does not recognise our data as 2d # TODO dimensionless? def _quad_omega(field, data): return data["omega"]**2 yt.add_field(("amrvac", "_quad_omega"), function=_quad_omega, units='dimensionless') # add field: kinetic energy per cell # the cell-volume is important as weight, s.t. data with different mesh sizes can be compared # TODO yt does not recognise our data as 2d # TODO dimensionless? def _norm_grad_omega(field, data): return data["grad_omega_x_"]**2 + data["grad_omega_y_"]**2 yt.add_field(("gas", "_norm_grad_omega"), function=_norm_grad_omega, units='dimensionless') # compute kinetic energy for each time step for ds in ts: #print(ds.shape) dd = ds.all_data() #print(dd) #TODO use smoothed_covering_grid() for top down regridding of data # or find out whether dd.max_level uses interpolation dd.max_level = grid_level times.append(ds.current_time.in_units("s")) E_kin.append( dd.quantities.total_quantity([("gas", "_kinetic_energy_per_cell") ])) Enstrophy.append( 0.5 * dd.quantities.total_quantity([("amrvac", "_quad_omega")])) Palinstrophy.append( 0.5 * dd.quantities.total_quantity([("gas", "_norm_grad_omega")])) E_kin = np.array(E_kin) Palinstrophy = np.array(Palinstrophy) # plot data fig, axs = plt.subplots(3) #fig.suptitle('Vertically stacked subplots') #axs[0].plot(x, y) #axs[1].plot(x, -y) axs[0].plot(times, E_kin) axs[0].set(xlabel='Time (s)', ylabel='Kinetic energy ($g\cdot cm^2/s^2$)') axs[1].plot(times, Enstrophy) axs[1].set(xlabel='Time (s)', ylabel='Enstrophy') axs[2].plot(times, Palinstrophy) axs[2].set(xlabel='Time (s)', ylabel='Palinstrophy') fig.show() fig.savefig(out_dir + 'kinetic_energy_' + basename + '.pdf')
def __init__(self, outputs, indices, fields=None, suppress_logging=False, ptype=None): indices.sort() # Just in case the caller wasn't careful self.field_data = YTFieldData() self.data_series = outputs self.masks = [] self.sorts = [] self.array_indices = [] self.indices = indices self.num_indices = len(indices) self.num_steps = len(outputs) self.times = [] self.suppress_logging = suppress_logging self.ptype = ptype if fields is None: fields = [] fields = list(OrderedDict.fromkeys(fields)) if self.suppress_logging: old_level = int(ytcfg.get("yt", "loglevel")) mylog.setLevel(40) ds_first = self.data_series[0] dd_first = ds_first.all_data() fds = {} for field in ( "particle_index", "particle_position_x", "particle_position_y", "particle_position_z", ): fds[field] = self._get_full_field_name(field)[0] my_storage = {} pbar = get_pbar("Constructing trajectory information", len(self.data_series)) for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)): dd = ds.all_data() newtags = dd[fds["particle_index"]].d.astype("int64") mask = np.in1d(newtags, indices, assume_unique=True) sort = np.argsort(newtags[mask]) array_indices = np.where( np.in1d(indices, newtags, assume_unique=True))[0] self.array_indices.append(array_indices) self.masks.append(mask) self.sorts.append(sort) pfields = {} for field in (f"particle_position_{ax}" for ax in "xyz"): pfields[field] = dd[fds[field]].ndarray_view()[mask][sort] sto.result_id = ds.parameter_filename sto.result = (ds.current_time, array_indices, pfields) pbar.update(i) pbar.finish() if self.suppress_logging: mylog.setLevel(old_level) sorted_storage = sorted(my_storage.items()) times = [time for _fn, (time, *_) in sorted_storage] self.times = self.data_series[0].arr(times, times[0].units) self.particle_fields = [] output_field = np.empty((self.num_indices, self.num_steps)) output_field.fill(np.nan) for field in (f"particle_position_{ax}" for ax in "xyz"): for i, (_fn, (_time, indices, pfields)) in enumerate(sorted_storage): try: # This will fail if particles ids are # duplicate. This is due to the fact that the rhs # would then have a different shape as the lhs output_field[indices, i] = pfields[field] except ValueError as e: raise YTIllDefinedParticleData( "This dataset contains duplicate particle indices!" ) from e self.field_data[field] = array_like_field(dd_first, output_field.copy(), fds[field]) self.particle_fields.append(field) # Instantiate fields the caller requested self._get_data(fields)
def __init__(self, outputs, indices, fields=None, suppress_logging=False): indices.sort() # Just in case the caller wasn't careful self.field_data = YTFieldData() self.data_series = outputs self.masks = [] self.sorts = [] self.array_indices = [] self.indices = indices self.num_indices = len(indices) self.num_steps = len(outputs) self.times = [] self.suppress_logging = suppress_logging if fields is None: fields = [] fields = list(OrderedDict.fromkeys(fields)) if self.suppress_logging: old_level = int(ytcfg.get("yt","loglevel")) mylog.setLevel(40) fds = {} ds_first = self.data_series[0] dd_first = ds_first.all_data() idx_field = dd_first._determine_fields("particle_index")[0] for field in ("particle_position_%s" % ax for ax in "xyz"): fds[field] = dd_first._determine_fields(field)[0] my_storage = {} pbar = get_pbar("Constructing trajectory information", len(self.data_series)) for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)): dd = ds.all_data() newtags = dd[idx_field].d.astype("int64") mask = np.in1d(newtags, indices, assume_unique=True) sort = np.argsort(newtags[mask]) array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0] self.array_indices.append(array_indices) self.masks.append(mask) self.sorts.append(sort) pfields = {} for field in ("particle_position_%s" % ax for ax in "xyz"): pfields[field] = dd[fds[field]].ndarray_view()[mask][sort] sto.result_id = ds.parameter_filename sto.result = (ds.current_time, array_indices, pfields) pbar.update(i) pbar.finish() if self.suppress_logging: mylog.setLevel(old_level) times = [] for fn, (time, indices, pfields) in sorted(my_storage.items()): times.append(time) self.times = self.data_series[0].arr([time for time in times], times[0].units) self.particle_fields = [] output_field = np.empty((self.num_indices, self.num_steps)) output_field.fill(np.nan) for field in ("particle_position_%s" % ax for ax in "xyz"): for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())): output_field[indices, i] = pfields[field] self.field_data[field] = array_like_field( dd_first, output_field.copy(), fds[field]) self.particle_fields.append(field) # Instantiate fields the caller requested self._get_data(fields)
import matplotlib as mpl mpl.use('Agg') import yt from yt import YTArray import trident import numpy as np import numpy.random as npr import matplotlib.pyplot as plt import time from mpi4py import MPI import os import shutil from yt.funcs import mylog mylog.setLevel(40) # only logs errors and critical problems comm = MPI.COMM_WORLD t_start = time.time() # number of pixels per dimension (for Aitoff projection) pixels_per_dim = 512 remove_first_N_kpc = 1.0 # Load dataset fn = "/mnt/scratch/dsilvia/simulations/reu_sims/MW_1638kpcBox_800pcCGM_200pcDisk_lowres/DD1500/DD1500" ds = yt.load(fn) # Add H I & O VI ion fields using Trident
#script to pull out the list of column densities within the center of the simulations #centered on the cloud. These are then written to a file to be used to #calculate the cumulative distribution of the column densities import yt import numpy as np import trident as tri import h5py from massFracs import * import pandas as pd from yt.units import centimeter, gram, second, Kelvin, erg from yt.funcs import mylog mylog.setLevel(50) # This sets the log level to "CRITICAL" kpc = 3.086e+21 * centimeter c_speed = 3.0e10 #cm/s mp = 1.6726e-24 * gram #grams kb = 1.3806e-16 * erg / Kelvin #egs/K #add metallicity to dataset, constant Z = 1 Zsun def _metallicity(field, data): v = data['ones'] #sets metallicity to 1 Zsun return data.apply_units(v, "Zsun") #function to read in spectrum, convert to velocity and return #wavelength and flux numpy arrays def convert_to_vel(filename, rest_wave):
-- Bug persist even if we do cut one-by-one. We modified yt clump_handling.py to fix this.. last mod: 23 July 2018 """ import sys assert 'yt' in sys.path[1] # instead of using the anaconda version import yt import os from yt.funcs import mylog # This sets the log level to "ERROR", http://yt-project.org/doc/faq/index.html mylog.setLevel(0) import numpy as np from clump_modules.clump_wrapper import ytclumpfind_H2, get_phyprop_of_leaf from io_modules.manipulate_fetch_gal_fields import import_fetch_gal, import_fetch_stars, prepare_unigrid, check_hist_h2, check_power from plot_modules.plot_cloud_prop import setup_plot setup_plot() outdir = 'test_brute/' if not os.path.isdir(outdir): os.mkdir(outdir) field_select = "h2density"
#!/usr/bin/env python import yt from yt.funcs import mylog import yaml import numpy as np import psutil # Wavenumber to load? number = '0' # Is it partial? parital = False numcpus = psutil.cpu_count(logical=False) mylog.setLevel(40) # Load configuration data with open('wafer.yaml') as config_file: config = yaml.safe_load(config_file) num = config['grid']['size'] dn = config['grid']['dn'] dt = config['grid']['dt'] x = (dn*num['x']-dn)/2 y = (dn*num['y']-dn)/2 z = (dn*num['z']-dn)/2 # Load potential data
def prepare_unigrid(data, regionsize_kpc=7., verbose=False, add_unit=False, debug=False): import yt if (not verbose): from yt.funcs import mylog mylog.setLevel(40) if (add_unit): # see http://yt-project.org/doc/examining/generic_array_data.html # for reference from astropy import constants as cc mp = cc.m_p.cgs.value # reminder: it should be # read from the camera bbox_lim = regionsize_kpc / 2. bbox = np.array([[-bbox_lim, bbox_lim], [-bbox_lim, bbox_lim], [-bbox_lim, bbox_lim]]) shape_data = data['density'].shape # data should be added with proper units here # (maybe get_units from pymses can be used) print data.keys() data = dict(density=(mp * data['density'], "g/cm**3"), h2density=(data["density"] * data["H2"], "1/cm**3"), P=(data['P'], "K/cm**3"), P_nt=(data['P_nt'], "K/cm**3"), Z=(data['Z'], ""), velx=(data['velx'], "km/s"), vely=(data['vely'], "km/s"), velz=(data['velz'], "km/s")) ds = yt.load_uniform_grid(data, shape_data, length_unit='kpc', bbox=bbox) else: ds = yt.load_uniform_grid(data, data["density"].shape) field = ("h2density") def _h2density(field, data): try: return data["density"] * data["H2"] except: return data[("stream", "density")] * data[("stream", "H2")] # unit is in 1/cc only if convert_unit is properly called when loading # in data ds.add_field(("stream", "h2density"), function=_h2density, units="g/cm**3") dd = ds.all_data() if debug: prj = yt.ProjectionPlot(ds, 0, 'h2density', center='c', weight_field='density') prj.set_unit('h2density', '1/cm**3') prj.save('test_h2density_yt_unit_plot.png') print 'dump to ', 'test_h2density_yt_unit_plot.png' prj = yt.ProjectionPlot(ds, 0, 'density', center='c', weight_field='density') #prj.set_unit('density', 'g/cm**3') prj.set_unit('density', 'Msun/pc**3') #prj.set_unit('density', 'code_mass/code_length**3') prj.save('test_density_yt_unit_plot.png') print 'dump to ', 'test_density_yt_unit_plot.png' print dd['h2density'].max(), dd['h2density'].min() print dd['density'].max(), dd['density'].min() return ds, dd
def read_virial2(self, num, Rsph_over_Rcl=1.95, xCL_min=1.0e-2, prefix='virial2', savdir=None, force_override=False): """ Function to calculate volume integral of various thermal/magnetic/gravitational energy terms in virial theorem Also calculates center of mass, half mass radius of neutral gas etc. """ # Print no log messages from yt.funcs import mylog mylog.setLevel(50) ds = self.load_vtk(num, load_method='yt') da = ds.all_data() # Set threshold xCL = 'specific_scalar_CL' xneu_min = 0.5 # Neutral if xHI + 2xH2 > xneu_min x0 = ds.domain_center # Volume of a cell in cm**3 dV = ((ds.domain_width/ds.domain_dimensions).prod()).to('pc**3') # Find indices for cloud and neutral portion of it idx_neu = (da['xHI'] + 2.0*da['xH2'] > xneu_min) idx_neu_cl = (da['xHI'] + 2.0*da['xH2'] > xneu_min) & (da[xCL] > xCL_min) # Calculate initial magnetic field if self.par['configure']['gas'] == 'hydro': mhd = False else: mhd = True if mhd: muB = self.par['problem']['muB'] else: muB = np.inf M0 = self.par['problem']['M_cloud'] R0 = self.par['problem']['R_cloud'] B0mag = 1.3488004135072468e-05*(M0/1e5)*(20.0/R0)**2*(2.0/muB)*yu.gauss thetaB = self.par['problem']['theta_B0']*np.pi/180.0 phiB = self.par['problem']['phi_B0']*np.pi/180.0 B0 = B0mag*yt.YTArray([np.sin(thetaB)*np.cos(phiB), np.sin(thetaB)*np.sin(phiB), np.cos(thetaB)]) # Save results to a dictionary r = dict() r['xCL_min'] = xCL_min r['Rsph_over_Rcl'] = Rsph_over_Rcl r['time_code'] = ds.current_time.item() r['time'] = ds.current_time.to('Myr') r['B0'] = B0 # Center of mass r['xCM'] = yt.YTArray([(da[ax][idx_neu_cl]*da['density'][idx_neu_cl]).sum()/\ da['density'][idx_neu_cl].sum() \ for ax in ('x','y','z')]).to('pc') # Add fields ds = self.add_fields_virial2(ds, mhd, x0=x0, xCM_neu_cl=r['xCM']) Rcl = self.par['problem']['R_cloud'] sph_ = ds.sphere(x0, (1.99*Rcl, "pc")) surf = ds.surface(sph_, "dist", (Rsph_over_Rcl*Rcl, "pc")) sph = ds.sphere(x0, (Rsph_over_Rcl*Rcl, "pc")) # These are the coordinates of all triangle vertices in the surface triangles = surf.triangles # construct triange normal vectors w1 = surf.triangles[:,1] - surf.triangles[:,0] w2 = surf.triangles[:,1] - surf.triangles[:,2] # calculate triangle areas vector_area = np.cross(w1, w2)/2*(yu.pc**2).to('cm**2') scalar_area = np.linalg.norm(vector_area, axis=1) surf_area = scalar_area.sum() # idx for neutral cloud within sphere idx = (sph['xHI'] + 2.0*sph['xH2'] > xneu_min) & (sph[xCL] > xCL_min) # Surface integral if mhd: rdotTM = np.column_stack([surf['rdotTM_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.erg/yu.cm**2) rdotPi_kin = np.column_stack([surf['rdotPi_kin_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.erg/yu.cm**2) rdotPi_thm = np.column_stack([surf['rdotPi_thm_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.erg/yu.cm**2) rhovrsq = np.column_stack([surf['rhovrsq_{0:s}'.format(x)] \ for x in 'xyz'])*(yu.g/yu.s) # Velocity dispersion for ax in ('x','y','z'): r[f'vmean_{ax}_cl'] = ((sph['density']*sph[f'velocity_{ax}'])[idx].sum()/ (sph['density'][idx]).sum()).to('km/s') r[f'vrms_{ax}_cl'] =\ (sph['density'][idx]*((sph[f'velocity_{ax}'])[idx] - r[f'vmean_{ax}_cl'])**2).sum()/\ (sph['density'][idx].sum()) r[f'vrms_{ax}_cl'] = np.sqrt(r[f'vrms_{ax}_cl']).to('km/s') r[f'vmean_{ax}_cl_all'] = ((da['density']*da[f'velocity_{ax}'])[idx_neu_cl].sum()/ (da['density'][idx_neu_cl]).sum()).to('km/s') r[f'vrms_{ax}_cl_all'] =\ (da['density'][idx_neu_cl]*((da[f'velocity_{ax}'])[idx_neu_cl] - r[f'vmean_{ax}_cl_all'])**2).sum()/\ (da['density'][idx_neu_cl].sum()) r[f'vrms_{ax}_cl_all'] = np.sqrt(r[f'vrms_{ax}_cl_all']).to('km/s') # (Neutral cloud) mass within sphere r['Mgas'] = ((sph['cell_volume']*sph['density']).sum()).to('Msun') r['Mgas_cl'] = ((sph['cell_volume'][idx]*sph['density'][idx]).sum()).to('Msun') r['Mgas_all'] = ((da['cell_volume']*da['density']).sum()).to('Msun') r['Mgas_cl_all'] = ((da['cell_volume'][idx_neu_cl]*da['density'][idx_neu_cl]).sum()).to('Msun') # Moment of inertia r['I_E'] = ((sph['cell_volume']*sph['rho_rsq']).sum()).to('g*cm**2') r['I_E_cl'] = ((sph['cell_volume'][idx]*sph['rho_rsq'][idx]).sum()).to('g*cm**2') r['I_E_all'] = ((da['cell_volume']*da['rho_rsq']).sum()).to('g*cm**2') r['I_E_cl_all'] = ((da['cell_volume'][idx_neu_cl]*da['rho_rsq'][idx_neu_cl]).sum()).to('g*cm**2') # Volume r['V'] = ((sph['cell_volume']).sum()).to('pc**3') r['V_cl'] = ((sph['cell_volume'][idx]).sum()).to('pc**3') r['V_all'] = ((da['cell_volume']).sum()).to('pc**3') r['V_cl_all'] = ((da['cell_volume'][idx_neu_cl]).sum()).to('pc**3') # Effective radius r['R_rms'] = np.sqrt(5.0/3.0*r['I_E']/r['Mgas']).to('pc') r['R_rms_cl'] = np.sqrt(5.0/3.0*r['I_E_cl']/r['Mgas_cl']).to('pc') r['R_rms_cl_all'] = np.sqrt(5.0/3.0*r['I_E_cl_all']/r['Mgas_cl_all']).to('pc') # Flux of momentum of inertia r['S_surf'] = 0.5*np.sum(vector_area*rhovrsq) # Surface integral of kinetic energy r['T_surf_kin'] = 0.5*np.sum(vector_area*rdotPi_kin) # Surface integral of thermal energy r['T_surf_thm'] = 0.5*np.sum(vector_area*rdotPi_thm) # Volume integral of thermal/kinetic energy r['T_thm'] = 1.5*((sph['cell_volume']*sph['pressure']).sum()).to('erg') r['T_kin'] = 0.5*((sph['cell_volume']*sph['density']* sph['velocity_magnitude']**2).sum()).to('erg') r['T_thm_cl'] = 1.5*((sph['cell_volume'][idx]*sph['pressure'][idx]).sum()).to('erg') r['T_kin_cl'] = 0.5*((sph['cell_volume'][idx]*sph['density'][idx]* sph['velocity_magnitude'][idx]**2).sum()).to('erg') r['T_thm_all'] = 1.5*((da['cell_volume']*da['pressure']).sum()).to('erg') r['T_kin_all'] = 0.5*((da['cell_volume']*da['density']* da['velocity_magnitude']**2).sum()).to('erg') r['T_thm_cl_all'] = 1.5*((da['cell_volume'][idx_neu_cl]* da['pressure'][idx_neu_cl]).sum()).to('erg') r['T_kin_cl_all'] = 0.5*((da['cell_volume'][idx_neu_cl]* da['density'][idx_neu_cl]* da['velocity_magnitude'][idx_neu_cl]**2).sum()).to('erg') # Surface integral of Maxwell stress if mhd: # Surface integral of Maxwell tensor r['B_surf'] = np.sum(vector_area*rdotTM) # Equivalent method # r['M_surf_'] = surf.calculate_flux('rdotTM_x', 'rdotTM_y', 'rdotTM_z', # fluxing_field="ones") # Volume integral of magnetic energy density r['B'] = (sph['cell_volume']*sph['magnetic_energy']).sum().to('erg') r['B_cl'] = (sph['cell_volume'][idx]*sph['magnetic_energy'][idx]).sum().to('erg') r['B_all'] = (da['cell_volume']*da['magnetic_energy']).sum().to('erg') r['B_cl_all'] = (da['cell_volume'][idx_neu_cl]*da['magnetic_energy'][idx_neu_cl]).sum().to('erg') # Define average magnetic field on surface r['B_surf_avg_x'] = (surf['magnetic_field_x']*scalar_area).sum()/surf_area r['B_surf_avg_y'] = (surf['magnetic_field_y']*scalar_area).sum()/surf_area r['B_surf_avg_z'] = (surf['magnetic_field_z']*scalar_area).sum()/surf_area r['B_surf_avg_mag'] = np.sqrt(r['B_surf_avg_x']**2 + r['B_surf_avg_y']**2 + r['B_surf_avg_z']**2) # Magnetic energy as obtaind by volume integral of B^2 - B_avg^2 r['B_alt'] = (((sph['magnetic_field_magnitude']**2 - r['B_surf_avg_mag']**2)* sph['cell_volume']).sum()).to('erg') / (8.0*np.pi) r['B_cl_alt'] = (((sph['magnetic_field_magnitude'][idx]**2 - r['B_surf_avg_mag']**2)* sph['cell_volume'][idx]).sum()).to('erg') / (8.0*np.pi) r['B_cl_alt_all'] = (((da['magnetic_field_magnitude'][idx_neu_cl]**2 - r['B_surf_avg_mag']**2)* da['cell_volume'][idx_neu_cl]).sum()).to('erg') / (8.0*np.pi) else: r['B'] = 0.0 r['B_surf'] = 0.0 r['B_cl'] = 0.0 r['B_cl_all'] = 0.0 r['B_surf_avg_x'] = 0.0 r['B_surf_avg_y'] = 0.0 r['B_surf_avg_z'] = 0.0 r['B_surf_avg_mag'] = 0.0 r['B_alt'] = 0.0 r['B_cl_alt'] = 0.0 r['B_cl_alt_all'] = 0.0 # Gravitational term (gravitational energy in the absence of external potential) r['W'] = (sph['density']*\ (sph['x']*sph['gravitational_potential_gradient_x'] +\ sph['y']*sph['gravitational_potential_gradient_y'] +\ sph['z']*sph['gravitational_potential_gradient_z'])* sph['cell_volume']).sum().to('erg') r['W_cl'] = (sph['density'][idx]*\ (sph['x'][idx]*sph['gravitational_potential_gradient_x'][idx] +\ sph['y'][idx]*sph['gravitational_potential_gradient_y'][idx] +\ sph['z'][idx]*sph['gravitational_potential_gradient_z'][idx])* sph['cell_volume'][idx]).sum().to('erg') r['W_cl_all'] = (da['density'][idx_neu_cl]*\ (da['x'][idx_neu_cl]*da['gravitational_potential_gradient_x'][idx_neu_cl] +\ da['y'][idx_neu_cl]*da['gravitational_potential_gradient_y'][idx_neu_cl] +\ da['z'][idx_neu_cl]*da['gravitational_potential_gradient_z'][idx_neu_cl])* da['cell_volume'][idx_neu_cl]).sum().to('erg') return r
import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.colors import ListedColormap import yt from yt.units import dimensions import trident as tri from mpl_toolkits.axes_grid1 import AxesGrid from massFracs import * from yt.funcs import mylog mylog.setLevel(40) # This sets the log level to "ERROR" kpc = 3.086e21 #cm #define the runs and ions run1 = {'Name': 'M1-v480-T1-chem', 'times': ['0041']} run2 = {'Name': 'M6.2-v3000-T1-chem', 'times': ['0117']} run3 = {'Name': 'M3.6-v3000-T3-chem', 'times': ['0143']} runList = [] runList.append(run1) runList.append(run2) runList.append(run3) ion1 = { 'atom': 'H', 'ion': 'I', 'name': 'HI', 'chem': 'fracHI_chem', 'tri': 'fracHI_tri', 'ionfolder': '/HI/', 'rest_wave': 1215.67,
def _get_data(self, fields): """ Get a list of fields to include in the trajectory collection. The trajectory collection itself is a dict of 2D numpy arrays, with shape (num_indices, num_steps) """ missing_fields = [ field for field in fields if field not in self.field_data ] if not missing_fields: return if self.suppress_logging: old_level = int(ytcfg.get("yt", "loglevel")) mylog.setLevel(40) ds_first = self.data_series[0] dd_first = ds_first.all_data() fds = {} new_particle_fields = [] for field in missing_fields: fds[field] = dd_first._determine_fields(field)[0] if field not in self.particle_fields: if self.data_series[0]._get_field_info( *fds[field]).particle_type: self.particle_fields.append(field) new_particle_fields.append(field) grid_fields = [ field for field in missing_fields if field not in self.particle_fields ] step = int(0) pbar = get_pbar( f"Generating [{', '.join(missing_fields)}] fields in trajectories", self.num_steps, ) my_storage = {} for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)): mask = self.masks[i] sort = self.sorts[i] pfield = {} if new_particle_fields: # there's at least one particle field dd = ds.all_data() for field in new_particle_fields: # This is easy... just get the particle fields pfield[field] = dd[fds[field]].d[mask][sort] if grid_fields: # This is hard... must loop over grids for field in grid_fields: pfield[field] = np.zeros(self.num_indices) x = self["particle_position_x"][:, step].d y = self["particle_position_y"][:, step].d z = self["particle_position_z"][:, step].d particle_grids, particle_grid_inds = ds.index._find_points( x, y, z) # This will fail for non-grid index objects for grid in particle_grids: cube = grid.retrieve_ghost_zones(1, grid_fields) for field in grid_fields: CICSample_3( x, y, z, pfield[field], self.num_indices, cube[fds[field]], np.array(grid.LeftEdge).astype(np.float64), np.array(grid.ActiveDimensions).astype(np.int32), grid.dds[0], ) sto.result_id = ds.parameter_filename sto.result = (self.array_indices[i], pfield) pbar.update(step) step += 1 pbar.finish() output_field = np.empty((self.num_indices, self.num_steps)) output_field.fill(np.nan) for field in missing_fields: fd = fds[field] for i, (_fn, (indices, pfield)) in enumerate(sorted(my_storage.items())): output_field[indices, i] = pfield[field] self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd) if self.suppress_logging: mylog.setLevel(old_level)
#comm.Barrier() #t_start = MPI.Wtime() time = np.array([]) rad_dens =np.zeros([p_res, nfiles]) #center = [0.0, 0.0, 0.5] for i in range(start, end): if i > 9 and i <= 99: file = fname + "00" + str(i) elif i > 99 and i <= 999: file = fname + "0" + str(i) elif i > 999 : file = fname + str(i) else : file = fname + "000" + str(i) mylog.setLevel(40) ds = yt.load(file) print("Reading", file) # Create a 4cm-radius sphere center = ds.domain_left_edge#-(0.45,0.45,0.) sp = ds.sphere(center, r_max) profile = yt.create_profile(sp, 'radius', ['pressure'], n_bins=p_res, units = {'radius': 'cm', "pressure": "dyn/cm**2"}, logs = {'radius': False, "pressure": True}) # Transform the profile from a dictionary to a numpy array profVal = list(profile.field_data.values()) for k in profVal: d = k rad_dens[:,i] = d time = np.append(time, float(ds.current_time)) rad = np.array(profile.x)/r_tar
def __call__(self, parser, namespace, values, option_string=None): param, val = values.split("=") mylog.debug("Overriding config: %s = %s", param, val) ytcfg["yt", param] = val if param == "log_level": # special case mylog.setLevel(int(val))
norm_L1_error(ds_ref, ds, comparison_fields, Nx=dim_length, Ny=dim_length, Nz=dim_length)) if __name__ == '__main__': args = parser.parse_args() verbose = args.v if not verbose: from yt.funcs import mylog mylog.setLevel(30) # load the target data to be analyzed target_file = get_block_list(args.target_path) ds = yt_load(target_file) specified_fields = args.fields if args.ref_type == 'sim': sim_comp = True unexpected_args = [("std", False), ("median", False), ("permute", None), ("reverse", False), ("offset_soln", 0.), ('bkg_velocity', [])] for name, default_val in unexpected_args: if hasattr(args, name) and getattr(args, name) != default_val: