def hydro_offset(self): if self._hydro_offset is not None: return self._hydro_offset # We now have to open the file and calculate it f = open(self.hydro_fn, "rb") fpu.skip(f, 6) # It goes: level, CPU, 8-variable min_level = self.ds.min_level n_levels = self.amr_header['nlevelmax'] - min_level hydro_offset = np.zeros(n_levels, dtype='int64') hydro_offset -= 1 level_count = np.zeros(n_levels, dtype='int64') skipped = [] for level in range(self.amr_header['nlevelmax']): for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']): header = (('file_ilevel', 1, 'I'), ('file_ncache', 1, 'I')) try: hvals = fpu.read_attrs(f, header, "=") except AssertionError: print "You are running with the wrong number of fields." print "If you specified these in the load command, check the array length." print "In this file there are %s hydro fields." % skipped #print "The last set of field sizes was: %s" % skipped raise if hvals['file_ncache'] == 0: continue assert (hvals['file_ilevel'] == level + 1) if cpu + 1 == self.domain_id and level >= min_level: hydro_offset[level - min_level] = f.tell() level_count[level - min_level] = hvals['file_ncache'] skipped = fpu.skip(f, 8 * self.nvar) self._hydro_offset = hydro_offset self._level_count = level_count return self._hydro_offset
def hydro_offset(self): if self._hydro_offset is not None: return self._hydro_offset # We now have to open the file and calculate it f = open(self.hydro_fn, "rb") fpu.skip(f, 6) # It goes: level, CPU, 8-variable min_level = self.ds.min_level n_levels = self.amr_header['nlevelmax'] - min_level hydro_offset = np.zeros(n_levels, dtype='int64') hydro_offset -= 1 level_count = np.zeros(n_levels, dtype='int64') skipped = [] for level in range(self.amr_header['nlevelmax']): for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']): header = ( ('file_ilevel', 1, 'I'), ('file_ncache', 1, 'I') ) try: hvals = fpu.read_attrs(f, header, "=") except AssertionError: print("You are running with the wrong number of fields.") print("If you specified these in the load command, check the array length.") print("In this file there are %s hydro fields." % skipped) #print "The last set of field sizes was: %s" % skipped raise if hvals['file_ncache'] == 0: continue assert(hvals['file_ilevel'] == level+1) if cpu + 1 == self.domain_id and level >= min_level: hydro_offset[level - min_level] = f.tell() level_count[level - min_level] = hvals['file_ncache'] skipped = fpu.skip(f, 8 * self.nvar) self._hydro_offset = hydro_offset self._level_count = level_count return self._hydro_offset
def fill(self, content, fields, selector): # Here we get a copy of the file, which we skip through and read the # bits we want. oct_handler = self.oct_handler all_fields = self.domain.ds.index.fluid_field_list fields = [f for ft, f in fields] tr = {} cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id) levels, cell_inds, file_inds = self.oct_handler.file_index_octs( selector, self.domain_id, cell_count) for field in fields: tr[field] = np.zeros(cell_count, 'float64') for level, offset in enumerate(self.domain.hydro_offset): if offset == -1: continue content.seek(offset) nc = self.domain.level_count[level] temp = {} for field in all_fields: temp[field] = np.empty((nc, 8), dtype="float64") for i in range(8): for field in all_fields: if field not in fields: fpu.skip(content) else: temp[field][:, i] = fpu.read_vector(content, 'd') # cell 1 oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp) return tr
def fill(self, content, fields, selector): # Here we get a copy of the file, which we skip through and read the # bits we want. oct_handler = self.oct_handler all_fields = self.domain.ds.index.fluid_field_list fields = [f for ft, f in fields] tr = {} cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id) levels, cell_inds, file_inds = self.oct_handler.file_index_octs( selector, self.domain_id, cell_count) for field in fields: tr[field] = np.zeros(cell_count, 'float64') for level, offset in enumerate(self.domain.hydro_offset): if offset == -1: continue content.seek(offset) nc = self.domain.level_count[level] temp = {} for field in all_fields: temp[field] = np.empty((nc, 8), dtype="float64") for i in range(8): for field in all_fields: if field not in fields: fpu.skip(content) else: temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1 oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp) return tr
def fill(self, content, fields, selector, file_handler): # Here we get a copy of the file, which we skip through and read the # bits we want. oct_handler = self.oct_handler all_fields = [f for ft, f in file_handler.field_list] fields = [f for ft, f in fields] tr = {} cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id) levels, cell_inds, file_inds = self.oct_handler.file_index_octs( selector, self.domain_id, cell_count) # Initializing data container for field in fields: tr[field] = np.zeros(cell_count, 'float64') # Loop over levels for level, offset in enumerate(file_handler.offset): if offset == -1: continue content.seek(offset) nc = file_handler.level_count[level] tmp = {} # Initalize temporary data container for io for field in all_fields: tmp[field] = np.empty((nc, 8), dtype="float64") for i in range(8): # Read the selected fields for field in all_fields: if field not in fields: fpu.skip(content) else: tmp[field][:, i] = fpu.read_vector(content, 'd') # i-th cell oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, tmp) return tr
def _read_hydro_header(self): # If no hydro file is found, return if not self._is_hydro(): return if self.nvar > 0: return self.nvar # Read the number of hydro variables f = open(self.hydro_fn, "rb") fpu.skip(f, 1) self.nvar = fpu.read_vector(f, "i")[0]
def read_header(self): if not self.exists: self.field_offsets = {} self.field_types = {} self.local_particle_count = 0 return f = open(self.fname, "rb") f.seek(0, os.SEEK_END) flen = f.tell() f.seek(0) hvals = {} attrs = self.attrs hvals.update(fpu.read_attrs(f, attrs)) self.header = hvals self.local_particle_count = hvals['npart'] if self.has_part_descriptor: particle_fields = (_read_part_file_descriptor( self.file_descriptor)) else: particle_fields = list(self.known_fields) if self.ds._extra_particle_fields is not None: particle_fields += self.ds._extra_particle_fields field_offsets = {} _pfields = {} ptype = self.ptype # Read offsets for field, vtype in particle_fields: if f.tell() >= flen: break field_offsets[ptype, field] = f.tell() _pfields[ptype, field] = vtype fpu.skip(f, 1) iextra = 0 while f.tell() < flen: iextra += 1 field, vtype = ('particle_extra_field_%i' % iextra, 'd') particle_fields.append((field, vtype)) field_offsets[ptype, field] = f.tell() _pfields[ptype, field] = vtype fpu.skip(f, 1) if iextra > 0 and not self.ds._warn_extra_fields: self.ds._warn_extra_fields = True w = ("Detected %s extra particle fields assuming kind " "`double`. Consider using the `extra_particle_fields` " "keyword argument if you have unexpected behavior.") mylog.warning(w % iextra) self.field_offsets = field_offsets self.field_types = _pfields
def read_star_field(file, field=None): data = {} with open(file, "rb") as fh: for dtype, variables in star_struct: found = (isinstance(variables, tuple) and field in variables) or field == variables if found: data[field] = read_vector(fh, dtype[1], dtype[0]) else: skip(fh, endian=">") return data.pop(field)
def offset(self): ''' Compute the offsets of the fields. By default, it skips the header (as defined by `cls.attrs`) and computes the offset at each level. It should be generic enough for most of the cases, but if the *structure* of your fluid file is non-canonical, change this. ''' if getattr(self, '_offset', None) is not None: return self._offset nvar = self.parameters['nvar'] ndim = self.domain.ds.dimensionality twotondim = 2**ndim with open(self.fname, 'rb') as f: # Skip headers nskip = len(self.attrs) fpu.skip(f, nskip) # It goes: level, CPU, 8-variable (1 cube) min_level = self.domain.ds.min_level n_levels = self.domain.amr_header['nlevelmax'] - min_level offset = np.zeros(n_levels, dtype='int64') offset -= 1 level_count = np.zeros(n_levels, dtype='int64') skipped = [] amr_header = self.domain.amr_header for level in range(amr_header['nlevelmax']): for cpu in range(amr_header['nboundary'] + amr_header['ncpu']): header = (('file_ilevel', 1, 'I'), ('file_ncache', 1, 'I')) try: hvals = fpu.read_attrs(f, header, "=") except AssertionError: mylog.error( "You are running with the wrong number of fields. " "If you specified these in the load command, check the array length. " "In this file there are %s hydro fields." % skipped) raise if hvals['file_ncache'] == 0: continue assert (hvals['file_ilevel'] == level + 1) if cpu + 1 == self.domain_id and level >= min_level: offset[level - min_level] = f.tell() level_count[level - min_level] = hvals['file_ncache'] skipped = fpu.skip(f, twotondim * nvar) self._offset = offset self._level_count = level_count return self._offset
def read_header(self): if not self.exists: self.field_offsets = {} self.field_types = {} self.local_particle_count = 0 return f = open(self.fname, "rb") f.seek(0, os.SEEK_END) flen = f.tell() f.seek(0) hvals = {} # Read the header of the file attrs = self.attrs hvals.update(fpu.read_attrs(f, attrs)) self._header = hvals # This is somehow a trick here: we only want one domain to # be read, as ramses writes all the sinks in all the # domains. Here, we set the local_particle_count to 0 except # for the first domain to be red. if getattr(self.ds, '_sink_file_flag', False): self.local_particle_count = 0 else: self.ds._sink_file_flag = True self.local_particle_count = hvals['nsink'] # Read the fields + add the sink properties if self.has_part_descriptor: fields = (_read_part_file_descriptor(self.file_descriptor)) else: fields = list(self.known_fields) for i in range(self.ds.dimensionality * 2 + 1): for j in range(self.ds.max_level, self.ds.min_level): fields.append(("particle_prop_%s_%s" % (i, j), "d")) field_offsets = {} _pfields = {} # Fill the fields, offsets and types self.fields = [] for field, vtype in fields: self.fields.append(field) if f.tell() >= flen: break field_offsets[self.ptype, field] = f.tell() _pfields[self.ptype, field] = vtype fpu.skip(f, 1) self.field_offsets = field_offsets self.field_types = _pfields
def _read_amr_header(self): hvals = {} f = self.amr_file f.seek(0) for header in ramses_header(hvals): hvals.update(fpu.read_attrs(f, header)) # For speedup, skip reading of 'headl' and 'taill' fpu.skip(f, 2) hvals['numbl'] = fpu.read_vector(f, 'i') # That's the header, now we skip a few. hvals['numbl'] = np.array(hvals['numbl']).reshape( (hvals['nlevelmax'], hvals['ncpu'])) fpu.skip(f) if hvals['nboundary'] > 0: fpu.skip(f, 2) self.ngridbound = fpu.read_vector(f, 'i').astype("int64") else: self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64') free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), )) # NOQA ordering = fpu.read_vector(f, 'c') # NOQA fpu.skip(f, 4) # Now we're at the tree itself # Now we iterate over each level and each CPU. self.amr_header = hvals self.amr_offset = f.tell() self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum() self.total_oct_count = hvals['numbl'][self.ds.min_level:, :].sum( axis=0)
def _read_particle_header(self): if not os.path.exists(self.part_fn): self.local_particle_count = 0 self.particle_field_offsets = {} return f = open(self.part_fn, "rb") f.seek(0, os.SEEK_END) flen = f.tell() f.seek(0) hvals = {} attrs = ( ('ncpu', 1, 'I'), ('ndim', 1, 'I'), ('npart', 1, 'I') ) hvals.update(fpu.read_attrs(f, attrs)) fpu.read_vector(f, 'I') attrs = ( ('nstar_tot', 1, 'I'), ('mstar_tot', 1, 'd'), ('mstar_lost', 1, 'd'), ('nsink', 1, 'I') ) hvals.update(fpu.read_attrs(f, attrs)) self.particle_header = hvals self.local_particle_count = hvals['npart'] particle_fields = [ ("particle_position_x", "d"), ("particle_position_y", "d"), ("particle_position_z", "d"), ("particle_velocity_x", "d"), ("particle_velocity_y", "d"), ("particle_velocity_z", "d"), ("particle_mass", "d"), ("particle_identifier", "i"), ("particle_refinement_level", "I")] if hvals["nstar_tot"] > 0: particle_fields += [("particle_age", "d"), ("particle_metallicity", "d")] if self.ds._extra_particle_fields is not None: particle_fields += self.ds._extra_particle_fields field_offsets = {} _pfields = {} for field, vtype in particle_fields: if f.tell() >= flen: break field_offsets["io", field] = f.tell() _pfields["io", field] = vtype fpu.skip(f, 1) self.particle_field_offsets = field_offsets self.particle_field_types = _pfields self.particle_types = self.particle_types_raw = ("io",)
def _read_particle_header(self): if not os.path.exists(self.part_fn): self.local_particle_count = 0 self.particle_field_offsets = {} return f = open(self.part_fn, "rb") f.seek(0, os.SEEK_END) flen = f.tell() f.seek(0) hvals = {} attrs = ( ('ncpu', 1, 'I'), ('ndim', 1, 'I'), ('npart', 1, 'I') ) hvals.update(fpu.read_attrs(f, attrs)) fpu.read_vector(f, 'I') attrs = ( ('nstar_tot', 1, 'I'), ('mstar_tot', 1, 'd'), ('mstar_lost', 1, 'd'), ('nsink', 1, 'I') ) hvals.update(fpu.read_attrs(f, attrs)) self.particle_header = hvals self.local_particle_count = hvals['npart'] particle_fields = [ ("particle_position_x", "d"), ("particle_position_y", "d"), ("particle_position_z", "d"), ("particle_velocity_x", "d"), ("particle_velocity_y", "d"), ("particle_velocity_z", "d"), ("particle_mass", "d"), ("particle_identifier", "I"), ("particle_refinement_level", "I")] if hvals["nstar_tot"] > 0: particle_fields += [("particle_age", "d"), ("particle_metallicity", "d")] field_offsets = {} _pfields = {} for field, vtype in particle_fields: if f.tell() >= flen: break field_offsets["io", field] = f.tell() _pfields["io", field] = vtype fpu.skip(f, 1) self.particle_field_offsets = field_offsets self.particle_field_types = _pfields self.particle_types = self.particle_types_raw = ("io",)
def _read_sink_header(self): if not self._has_sink: self.local_sink_count = 0 self.sink_field_offsets = {} return f = open(self.sink_fn, "rb") f.seek(0, os.SEEK_END) flen = f.tell() f.seek(0) hvals = {} attrs = (('nsink', 1, 'I'), ('nindsink', 1, 'I')) hvals.update(fpu.read_attrs(f, attrs)) self.sink_header = hvals self.local_sink_count = hvals['nsink'] sink_fields = [("particle_identifier", "i"), ("particle_mass", "d"), ("particle_position_x", "d"), ("particle_position_y", "d"), ("particle_position_z", "d"), ("particle_velocity_x", "d"), ("particle_velocity_y", "d"), ("particle_velocity_z", "d"), ("particle_age", "d"), ("BH_real_accretion", "d"), ("BH_bondi_accretion", "d"), ("BH_eddington_accretion", "d"), ("BH_esave", "d"), ("gas_spin_x", "d"), ("gas_spin_y", "d"), ("gas_spin_z", "d"), ("BH_spin_x", "d"), ("BH_spin_y", "d"), ("BH_spin_z", "d"), ("BH_spin", "d"), ("BH_efficiency", "d")] for i in range(self.ds.dimensionality * 2 + 1): for j in range(self.ds.max_level, self.ds.min_level): sink_fields.append(("particle_prop_%s_%s" % (i, j), "d")) field_offsets = {} _pfields = {} for field, vtype in sink_fields: if f.tell() >= flen: break field_offsets["sink", field] = f.tell() _pfields["sink", field] = vtype fpu.skip(f, 1) self.sink_field_offsets = field_offsets self.sink_field_types = _pfields self._add_ptype('sink')
def _read_amr_header(self): hvals = {} f = open(self.amr_fn, "rb") for header in ramses_header(hvals): hvals.update(fpu.read_attrs(f, header)) # That's the header, now we skip a few. hvals['numbl'] = np.array(hvals['numbl']).reshape( (hvals['nlevelmax'], hvals['ncpu'])) fpu.skip(f) if hvals['nboundary'] > 0: fpu.skip(f, 2) self.ngridbound = fpu.read_vector(f, 'i').astype("int64") else: self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64') free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) ) ordering = fpu.read_vector(f, 'c') fpu.skip(f, 4) # Now we're at the tree itself # Now we iterate over each level and each CPU. self.amr_header = hvals self.amr_offset = f.tell() self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum() self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
def _parse_parameter_file(self): """ Get the various simulation parameters & constants. """ self.domain_left_edge = np.zeros(3, dtype="float") self.domain_right_edge = np.zeros(3, dtype="float") + 1.0 self.dimensionality = 3 self.refine_by = 2 self.periodicity = (True, True, True) self.cosmological_simulation = True self.parameters = {} self.parameters.update(constants) self.parameters["Time"] = 1.0 # read the amr header with open(self._file_amr, "rb") as f: amr_header_vals = fpu.read_attrs(f, amr_header_struct, ">") n_to_skip = len(("tl", "dtl", "tlold", "dtlold", "iSO")) fpu.skip(f, n_to_skip, endian=">") (self.ncell) = fpu.read_vector(f, "i", ">")[0] # Try to figure out the root grid dimensions est = int(np.rint(self.ncell**(1.0 / 3.0))) # Note here: this is the number of *cells* on the root grid. # This is not the same as the number of Octs. # domain dimensions is the number of root *cells* self.domain_dimensions = np.ones(3, dtype="int64") * est self.root_grid_mask_offset = f.tell() self.root_nocts = self.domain_dimensions.prod() // 8 self.root_ncells = self.root_nocts * 8 mylog.debug( "Estimating %i cells on a root grid side, %i root octs", est, self.root_nocts, ) self.root_iOctCh = fpu.read_vector(f, "i", ">")[:self.root_ncells] self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions, order="F") self.root_grid_offset = f.tell() self.root_nhvar = fpu.skip(f, endian=">") self.root_nvar = fpu.skip(f, endian=">") # make sure that the number of root variables is a multiple of # rootcells assert self.root_nhvar % self.root_ncells == 0 assert self.root_nvar % self.root_ncells == 0 self.nhydro_variables = (self.root_nhvar + self.root_nvar) / self.root_ncells self.iOctFree, self.nOct = fpu.read_vector(f, "i", ">") self.child_grid_offset = f.tell() # lextra needs to be loaded as a string, but it's actually # array values. So pop it off here, and then re-insert. lextra = amr_header_vals.pop("lextra") amr_header_vals["lextra"] = np.fromstring(lextra, ">f4") self.parameters.update(amr_header_vals) amr_header_vals = None # estimate the root level float_center, fl, iocts, nocts, root_level = _read_art_level_info( f, [0, self.child_grid_offset], 1, coarse_grid=self.domain_dimensions[0]) del float_center, fl, iocts, nocts self.root_level = root_level mylog.info("Using root level of %02i", self.root_level) # read the particle header self.particle_types = [] self.particle_types_raw = () if not self.skip_particles and self._file_particle_header: with open(self._file_particle_header, "rb") as fh: particle_header_vals = fpu.read_attrs(fh, particle_header_struct, ">") fh.seek(seek_extras) n = particle_header_vals["Nspecies"] wspecies = np.fromfile(fh, dtype=">f", count=10) lspecies = np.fromfile(fh, dtype=">i", count=10) # extras needs to be loaded as a string, but it's actually # array values. So pop it off here, and then re-insert. extras = particle_header_vals.pop("extras") particle_header_vals["extras"] = np.fromstring(extras, ">f4") self.parameters["wspecies"] = wspecies[:n] self.parameters["lspecies"] = lspecies[:n] for specie in range(n): self.particle_types.append("specie%i" % specie) self.particle_types_raw = tuple(self.particle_types) ls_nonzero = np.diff(lspecies)[:n - 1] ls_nonzero = np.append(lspecies[0], ls_nonzero) self.star_type = len(ls_nonzero) mylog.info("Discovered %i species of particles", len(ls_nonzero)) info_str = "Particle populations: " + "%9i " * len(ls_nonzero) mylog.info(info_str, *ls_nonzero) self._particle_type_counts = dict( zip(self.particle_types_raw, ls_nonzero)) for k, v in particle_header_vals.items(): if k in self.parameters.keys(): if not self.parameters[k] == v: mylog.info( "Inconsistent parameter %s %1.1e %1.1e", k, v, self.parameters[k], ) else: self.parameters[k] = v self.parameters_particles = particle_header_vals self.parameters.update(particle_header_vals) self.parameters["ng"] = self.parameters["Ngridc"] self.parameters["ncell0"] = self.parameters["ng"]**3 # setup standard simulation params yt expects to see self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0 self.omega_lambda = self.parameters["Oml0"] self.omega_matter = self.parameters["Om0"] self.hubble_constant = self.parameters["hubble"] self.min_level = self.parameters["min_level"] self.max_level = self.parameters["max_level"] if self.limit_level is not None: self.max_level = min(self.limit_level, self.parameters["max_level"]) if self.force_max_level is not None: self.max_level = self.force_max_level self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19) self.current_time = self.quan(b2t(self.parameters["t"]), "Gyr") self.gamma = self.parameters["gamma"] mylog.info("Max level is %02i", self.max_level)
def _read_amr(self): """Open the oct file, read in octs level-by-level. For each oct, only the position, index, level and domain are needed - its position in the octree is found automatically. The most important is finding all the information to feed oct_handler.add """ self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2, self.ds.domain_left_edge, self.ds.domain_right_edge) root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum() self.oct_handler.allocate_domains(self.total_oct_count, root_nodes) fb = open(self.amr_fn, "rb") fb.seek(self.amr_offset) f = BytesIO() f.write(fb.read()) f.seek(0) mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)", self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum()) def _ng(c, l): if c < self.amr_header['ncpu']: ng = self.amr_header['numbl'][l, c] else: ng = self.ngridbound[c - self.amr_header['ncpu'] + self.amr_header['nboundary']*l] return ng min_level = self.ds.min_level # yt max level is not the same as the RAMSES one. # yt max level is the maximum number of additional refinement levels # so for a uni grid run with no refinement, it would be 0. # So we initially assume that. max_level = 0 nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx']) for level in range(self.amr_header['nlevelmax']): # Easier if do this 1-indexed for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']): #ng is the number of octs on this level on this domain ng = _ng(cpu, level) if ng == 0: continue ind = fpu.read_vector(f, "I").astype("int64") fpu.skip(f, 2) pos = np.empty((ng, 3), dtype='float64') pos[:,0] = fpu.read_vector(f, "d") - nx pos[:,1] = fpu.read_vector(f, "d") - ny pos[:,2] = fpu.read_vector(f, "d") - nz #pos *= self.ds.domain_width #pos += self.dataset.domain_left_edge fpu.skip(f, 31) #parents = fpu.read_vector(f, "I") #fpu.skip(f, 6) #children = np.empty((ng, 8), dtype='int64') #for i in range(8): # children[:,i] = fpu.read_vector(f, "I") #cpu_map = np.empty((ng, 8), dtype="int64") #for i in range(8): # cpu_map[:,i] = fpu.read_vector(f, "I") #rmap = np.empty((ng, 8), dtype="int64") #for i in range(8): # rmap[:,i] = fpu.read_vector(f, "I") # We don't want duplicate grids. # Note that we're adding *grids*, not individual cells. if level >= min_level: assert(pos.shape[0] == ng) n = self.oct_handler.add(cpu + 1, level - min_level, pos, count_boundary = 1) self._error_check(cpu, level, pos, n, ng, (nx, ny, nz)) if n > 0: max_level = max(level - min_level, max_level) self.max_level = max_level self.oct_handler.finalize()
def _read_amr(self): """Open the oct file, read in octs level-by-level. For each oct, only the position, index, level and domain are needed - its position in the octree is found automatically. The most important is finding all the information to feed oct_handler.add """ self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions / 2, self.ds.domain_left_edge, self.ds.domain_right_edge) root_nodes = self.amr_header['numbl'][self.ds.min_level, :].sum() self.oct_handler.allocate_domains(self.total_oct_count, root_nodes) mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)", self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum()) f = self.amr_file f.seek(self.amr_offset) def _ng(c, l): if c < self.amr_header['ncpu']: ng = self.amr_header['numbl'][l, c] else: ng = self.ngridbound[c - self.amr_header['ncpu'] + self.amr_header['nboundary'] * l] return ng min_level = self.ds.min_level # yt max level is not the same as the RAMSES one. # yt max level is the maximum number of additional refinement levels # so for a uni grid run with no refinement, it would be 0. # So we initially assume that. max_level = 0 nx, ny, nz = (((i - 1.0) / 2.0) for i in self.amr_header['nx']) for level in range(self.amr_header['nlevelmax']): # Easier if do this 1-indexed for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']): #ng is the number of octs on this level on this domain ng = _ng(cpu, level) if ng == 0: continue ind = fpu.read_vector(f, "I").astype("int64") # NOQA fpu.skip(f, 2) pos = np.empty((ng, 3), dtype='float64') pos[:, 0] = fpu.read_vector(f, "d") - nx pos[:, 1] = fpu.read_vector(f, "d") - ny pos[:, 2] = fpu.read_vector(f, "d") - nz #pos *= self.ds.domain_width #pos += self.dataset.domain_left_edge fpu.skip(f, 31) #parents = fpu.read_vector(f, "I") #fpu.skip(f, 6) #children = np.empty((ng, 8), dtype='int64') #for i in range(8): # children[:,i] = fpu.read_vector(f, "I") #cpu_map = np.empty((ng, 8), dtype="int64") #for i in range(8): # cpu_map[:,i] = fpu.read_vector(f, "I") #rmap = np.empty((ng, 8), dtype="int64") #for i in range(8): # rmap[:,i] = fpu.read_vector(f, "I") # We don't want duplicate grids. # Note that we're adding *grids*, not individual cells. if level >= min_level: assert (pos.shape[0] == ng) n = self.oct_handler.add(cpu + 1, level - min_level, pos, count_boundary=1) self._error_check(cpu, level, pos, n, ng, (nx, ny, nz)) if n > 0: max_level = max(level - min_level, max_level) self.max_level = max_level self.oct_handler.finalize() # Close AMR file f.close()
def _parse_parameter_file(self): """ Get the various simulation parameters & constants. """ self.dimensionality = 3 self.refine_by = 2 self.periodicity = (True, True, True) self.cosmological_simulation = True self.parameters = {} self.unique_identifier = \ int(os.stat(self.parameter_filename)[stat.ST_CTIME]) self.parameters.update(constants) self.parameters['Time'] = 1.0 # read the amr header with open(self._file_amr, 'rb') as f: amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>') for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']: skipped = fpu.skip(f, endian='>') (self.ncell) = fpu.read_vector(f, 'i', '>')[0] # Try to figure out the root grid dimensions est = int(np.rint(self.ncell**(1.0/3.0))) # Note here: this is the number of *cells* on the root grid. # This is not the same as the number of Octs. # domain dimensions is the number of root *cells* self.domain_dimensions = np.ones(3, dtype='int64')*est self.root_grid_mask_offset = f.tell() self.root_nocts = self.domain_dimensions.prod()/8 self.root_ncells = self.root_nocts*8 mylog.debug("Estimating %i cells on a root grid side," + "%i root octs", est, self.root_nocts) self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells] self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions, order='F') self.root_grid_offset = f.tell() self.root_nhvar = fpu.skip(f, endian='>') self.root_nvar = fpu.skip(f, endian='>') # make sure that the number of root variables is a multiple of # rootcells assert self.root_nhvar % self.root_ncells == 0 assert self.root_nvar % self.root_ncells == 0 self.nhydro_variables = ((self.root_nhvar+self.root_nvar) / self.root_ncells) self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>') self.child_grid_offset = f.tell() self.parameters.update(amr_header_vals) amr_header_vals = None # estimate the root level float_center, fl, iocts, nocts, root_level = _read_art_level_info( f, [0, self.child_grid_offset], 1, coarse_grid=self.domain_dimensions[0]) del float_center, fl, iocts, nocts self.root_level = root_level mylog.info("Using root level of %02i", self.root_level) # read the particle header self.particle_types = [] self.particle_types_raw = () if not self.skip_particles and self._file_particle_header: with open(self._file_particle_header, "rb") as fh: particle_header_vals = fpu.read_attrs( fh, particle_header_struct, '>') fh.seek(seek_extras) n = particle_header_vals['Nspecies'] wspecies = np.fromfile(fh, dtype='>f', count=10) lspecies = np.fromfile(fh, dtype='>i', count=10) self.parameters['wspecies'] = wspecies[:n] self.parameters['lspecies'] = lspecies[:n] for specie in range(n): self.particle_types.append("specie%i" % specie) self.particle_types_raw = tuple( self.particle_types) ls_nonzero = np.diff(lspecies)[:n-1] ls_nonzero = np.append(lspecies[0], ls_nonzero) self.star_type = len(ls_nonzero) mylog.info("Discovered %i species of particles", len(ls_nonzero)) mylog.info("Particle populations: "+'%9i '*len(ls_nonzero), *ls_nonzero) for k, v in particle_header_vals.items(): if k in self.parameters.keys(): if not self.parameters[k] == v: mylog.info( "Inconsistent parameter %s %1.1e %1.1e", k, v, self.parameters[k]) else: self.parameters[k] = v self.parameters_particles = particle_header_vals self.parameters.update(particle_header_vals) self.parameters['ng'] = self.parameters['Ngridc'] self.parameters['ncell0'] = self.parameters['ng']**3 # setup standard simulation params yt expects to see self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0 self.omega_lambda = self.parameters['Oml0'] self.omega_matter = self.parameters['Om0'] self.hubble_constant = self.parameters['hubble'] self.min_level = self.parameters['min_level'] self.max_level = self.parameters['max_level'] if self.limit_level is not None: self.max_level = min( self.limit_level, self.parameters['max_level']) if self.force_max_level is not None: self.max_level = self.force_max_level self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19) self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr') self.gamma = self.parameters["gamma"] mylog.info("Max level is %02i", self.max_level)
def _parse_parameter_file(self): """ Get the various simulation parameters & constants. """ self.dimensionality = 3 self.refine_by = 2 self.periodicity = (True, True, True) self.cosmological_simulation = True self.parameters = {} self.unique_identifier = \ int(os.stat(self.parameter_filename)[stat.ST_CTIME]) self.parameters.update(constants) self.parameters['Time'] = 1.0 # read the amr header with open(self._file_amr, 'rb') as f: amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>') for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']: skipped = fpu.skip(f, endian='>') (self.ncell) = fpu.read_vector(f, 'i', '>')[0] # Try to figure out the root grid dimensions est = int(np.rint(self.ncell**(1.0 / 3.0))) # Note here: this is the number of *cells* on the root grid. # This is not the same as the number of Octs. # domain dimensions is the number of root *cells* self.domain_dimensions = np.ones(3, dtype='int64') * est self.root_grid_mask_offset = f.tell() self.root_nocts = self.domain_dimensions.prod() / 8 self.root_ncells = self.root_nocts * 8 mylog.debug( "Estimating %i cells on a root grid side," + "%i root octs", est, self.root_nocts) self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells] self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions, order='F') self.root_grid_offset = f.tell() self.root_nhvar = fpu.skip(f, endian='>') self.root_nvar = fpu.skip(f, endian='>') # make sure that the number of root variables is a multiple of # rootcells assert self.root_nhvar % self.root_ncells == 0 assert self.root_nvar % self.root_ncells == 0 self.nhydro_variables = ((self.root_nhvar + self.root_nvar) / self.root_ncells) self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>') self.child_grid_offset = f.tell() self.parameters.update(amr_header_vals) amr_header_vals = None # estimate the root level float_center, fl, iocts, nocts, root_level = _read_art_level_info( f, [0, self.child_grid_offset], 1, coarse_grid=self.domain_dimensions[0]) del float_center, fl, iocts, nocts self.root_level = root_level mylog.info("Using root level of %02i", self.root_level) # read the particle header self.particle_types = [] self.particle_types_raw = () if not self.skip_particles and self._file_particle_header: with open(self._file_particle_header, "rb") as fh: particle_header_vals = fpu.read_attrs(fh, particle_header_struct, '>') fh.seek(seek_extras) n = particle_header_vals['Nspecies'] wspecies = np.fromfile(fh, dtype='>f', count=10) lspecies = np.fromfile(fh, dtype='>i', count=10) self.parameters['wspecies'] = wspecies[:n] self.parameters['lspecies'] = lspecies[:n] for specie in range(n): self.particle_types.append("specie%i" % specie) self.particle_types_raw = tuple(self.particle_types) ls_nonzero = np.diff(lspecies)[:n - 1] ls_nonzero = np.append(lspecies[0], ls_nonzero) self.star_type = len(ls_nonzero) mylog.info("Discovered %i species of particles", len(ls_nonzero)) mylog.info("Particle populations: " + '%9i ' * len(ls_nonzero), *ls_nonzero) for k, v in particle_header_vals.items(): if k in self.parameters.keys(): if not self.parameters[k] == v: mylog.info("Inconsistent parameter %s %1.1e %1.1e", k, v, self.parameters[k]) else: self.parameters[k] = v self.parameters_particles = particle_header_vals self.parameters.update(particle_header_vals) self.parameters['ng'] = self.parameters['Ngridc'] self.parameters['ncell0'] = self.parameters['ng']**3 # setup standard simulation params yt expects to see self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0 self.omega_lambda = self.parameters['Oml0'] self.omega_matter = self.parameters['Om0'] self.hubble_constant = self.parameters['hubble'] self.min_level = self.parameters['min_level'] self.max_level = self.parameters['max_level'] if self.limit_level is not None: self.max_level = min(self.limit_level, self.parameters['max_level']) if self.force_max_level is not None: self.max_level = self.force_max_level self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19) self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr') self.gamma = self.parameters["gamma"] mylog.info("Max level is %02i", self.max_level)
def _read_particle_header(self): if not os.path.exists(self.part_fn): self.local_particle_count = 0 self.particle_field_offsets = {} return f = open(self.part_fn, "rb") f.seek(0, os.SEEK_END) flen = f.tell() f.seek(0) hvals = {} attrs = (('ncpu', 1, 'I'), ('ndim', 1, 'I'), ('npart', 1, 'I')) hvals.update(fpu.read_attrs(f, attrs)) fpu.read_vector(f, 'I') attrs = (('nstar_tot', 1, 'I'), ('mstar_tot', 1, 'd'), ('mstar_lost', 1, 'd'), ('nsink', 1, 'I')) hvals.update(fpu.read_attrs(f, attrs)) self.particle_header = hvals self.local_particle_count = hvals['npart'] # Try reading particle file descriptor if self._has_part_descriptor: particle_fields = (_read_part_file_descriptor( self._part_file_descriptor)) else: particle_fields = [("particle_position_x", "d"), ("particle_position_y", "d"), ("particle_position_z", "d"), ("particle_velocity_x", "d"), ("particle_velocity_y", "d"), ("particle_velocity_z", "d"), ("particle_mass", "d"), ("particle_identifier", "i"), ("particle_refinement_level", "I")] if self.ds._extra_particle_fields is not None: particle_fields += self.ds._extra_particle_fields ptype = 'io' field_offsets = {} _pfields = {} # Read offsets for field, vtype in particle_fields: if f.tell() >= flen: break field_offsets[ptype, field] = f.tell() _pfields[ptype, field] = vtype fpu.skip(f, 1) iextra = 0 while f.tell() < flen: iextra += 1 field, vtype = ('particle_extra_field_%i' % iextra, 'd') particle_fields.append((field, vtype)) field_offsets[ptype, field] = f.tell() _pfields[ptype, field] = vtype fpu.skip(f, 1) if iextra > 0 and not self.ds._warn_extra_fields: self.ds._warn_extra_fields = True w = ("Detected %s extra particle fields assuming kind " "`double`. Consider using the `extra_particle_fields` " "keyword argument if you have unexpected behavior.") mylog.warning(w % iextra) self.particle_field_offsets = field_offsets self.particle_field_types = _pfields # Register the particle type self._add_ptype(ptype)