def __init__(self, sdf_dir): # Sanity-check the directory path self.sdf_dir = sdf_dir if not os.path.exists(self.sdf_dir): raise ValueError("sdf_dir path does not exist: %s" \ % (self.sdf_dir)) # Make a sorted list of all SDF file names in the directory # These are the files with extensions made up entirely of digits self.sdf_file_names = list() for file_name in glob.glob(os.path.join(self.sdf_dir, "*.*")): dot_ext = os.path.splitext(file_name)[1] if dot_ext == "": continue if dot_ext[0] == '.' and dot_ext[1:].isdigit(): self.sdf_file_names.append(file_name) self.sdf_file_names.sort() self.n_sdfs = len(self.sdf_file_names) # Assume that the other necessary files have standard names self.abun_file_name = os.path.join(self.sdf_dir, STD_ABUN_FILE_NAME) self.ztpi_file_name = os.path.join(self.sdf_dir, STD_ZTPI_FILE_NAME) # Open all the SDF files simultaneously and keep them in a list # I might need to re-think this strategy if it requires too much RAM self.sdf_files = [sdfpy.SDFRead(sdf_file_name) for sdf_file_name \ in self.sdf_file_names] self.sdf_files.sort(key=lambda sdf: sdf.parameters["tpos"]) # Construct an array containing all unique particle ids from the SDFs self.particle_ids = reduce(np.union1d, [sdf["ident"] for sdf \ in self.sdf_files]) self.n_particles = self.particle_ids.size # Sanity-check some of the contents of the SDFs # First, make sure there are no repeated particle ids # Then, ensure self-consistency with the number of particles for sdf in self.sdf_files: assert test_if_unique(sdf["ident"]) assert sdf.parameters["npart"] == sdf["ident"].size assert sdf.parameters["npart"] <= self.n_particles # To hopefully decrease search times, check if each SDF is sorted self.sdf_is_sorted = np.array([test_if_sorted(sdf["ident"]) for sdf \ in self.sdf_files], "bool") # Read the other necesary files using the classes for them self.abun = Abundances(self.abun_file_name) self.ztpi = ZoneToPartId(self.ztpi_file_name) # Run every remaining sanity check I can think of # The first test ensures that the files agree on the number of zones # The second test ensures that they agree on the set of particle ids assert np.all(self.ztpi.get_zones() <= self.abun.get_n_zones() - 1) assert np.all(np.in1d(self.particle_ids, self.ztpi.particle_ids, assume_unique=True))
def get_tpos(filename): sdf = sp.SDFRead(filename) tpos = sdf.parameters["tpos"] del sdf return tpos
sdfs_with_tpos.sort(key=lambda x: x[0]) # Which jet3b SDF has the nearest tpos value to the final 50Am tpos value? bestdiff, bestfile = None, None for jet3b_tpos, jet3b_sdf in sdfs_with_tpos: diff = abs(jet3b_tpos - final_50Am_tpos) if bestdiff is None or diff < bestdiff: bestdiff = diff bestfile = jet3b_sdf best_jet3b_sdf = bestfile print "nearest jet3b sdf:", best_jet3b_sdf best_jet3b_tpos = get_tpos(best_jet3b_sdf) print "nearest jet3b tpos value:", best_jet3b_tpos / 36., "hrs" # Identify all desired data columns in the jet3b SDF sdf = sp.SDFRead(best_jet3b_sdf) iter = sdf.parameters["iter"] id = sdf["ident"] x, y, z = sdf["x"], sdf["y"], sdf["z"] mass = sdf["mass"] h = sdf["h"] rho = sdf["rho"] # Convert all values from code units to CGS x2 = x * SNSPH_LENGTH y2 = y * SNSPH_LENGTH z2 = z * SNSPH_LENGTH mass2 = mass * SNSPH_MASS h2 = h * SNSPH_LENGTH rho2 = rho * SNSPH_DENSITY
tpos = get_tpos(fullname) sdfs_with_tpos.append((tpos, fullname)) sdfs_with_tpos.sort(key=lambda x: x[0]) n_sdfs = len(sdfs_with_tpos) # Set up arrays to store the temperature history data etc. iter_arr = np.empty(n_sdfs, np.int) tpos_arr = np.empty(n_sdfs, np.float) temp_arr = np.empty((n_pids, n_sdfs), np.float) dens_arr = np.empty((n_pids, n_sdfs), np.float) # Read all of the desired data from the sim's SDFs for j, sdfname in enumerate([x[1] for x in sdfs_with_tpos]): # Open the SDF and read the time information sdf = sp.SDFRead(sdfname) iter_arr[j] = sdf.parameters["iter"] tpos_arr[j] = sdf.parameters["tpos"] # Read the data for the selected particles i = 0 for sdf_i, sdf_pid in enumerate(sdf["ident"]): if sdf_pid in selected_pids: assert sdf_pid == selected_pids[i] temp_arr[i, j] = sdf["temp"][sdf_i] dens_arr[i, j] = sdf["rho"][sdf_i] i += 1 # Make sure the SDF file is closed del sdf
glob.glob(SNDATA + "jet3b/jet3b/run3g_50Am_jet3b_sph.????"), glob.glob(SNDATA + "jet3b/jet3b/run3g_50Am_jet3b_sph.????"), glob.glob(SNDATA + "cco2/cco2/r3g_1M_cco2_sph.*00"), glob.glob(SNDATA + "cco2/cco2/r3g_1M_cco2_sph.*00") ] assert len(sims) == n assert len(values) == n assert len(sdfs) == n print "ready to go!\n" for i in xrange(n): # Open files, sort by tpos readers = [sdfpy.SDFRead(s) for s in sdfs[i]] readers.sort(key=lambda read: read.parameters['tpos']) print sims[i] + " sdfs opened" # Set up arrays to hold data tpos = np.empty(len(readers), "float32") pid = np.array(readers[-1]['ident'], "uint32") data = np.empty((pid.size, tpos.size), "float32") print sims[i] + " " + values[i] + " arrays ready" # Read data into arrays carefully # Ignore any PIDs that aren't in the final SDF file for j, read in enumerate(readers): tpos[j] = read.parameters['tpos']