def main(): printf(" \n",e=True) printf(" (Open)gadget2+3 to HDF5 format \n",e=True) printf(" by Antonio Ragagnin, 2018\n",e=True) printf(" \n",e=True) printf(" usage: python gadget_to_hdf5.py -i infile -o outfile -p ptype -b block -n hdf5-name\n",e=True) printf(" example: python gadget_to_hdf5.py -i infile -o file.hdf5 -p 0 -b TEMP -n Temperature\n",e=True) printf(" \n",e=True) parser = argparse.ArgumentParser(description='') parser.add_argument('-i', type=str, required=True) parser.add_argument('-o', type=str, required=True) parser.add_argument('-p', type=int, required=True) parser.add_argument('-b', type=str, required=True) parser.add_argument('-n', type=str, required=True) args = parser.parse_args() ptype=args.p f = g3read.GadgetFile(args.i) with h5py.File(args.o, "w") as g: poses = f.read("POS ",ptype) g.create_dataset("X", data = poses[:,0]) g.create_dataset("Y", data = poses[:,1]) g.create_dataset("Z", data = poses[:,2]) s=f.read(args.b,ptype) print("%s min=%f max=%f\n"%(args.b,np.min(s), np.max(s))) g.create_dataset(args.n, data = s)
def __init__(self, pintlessfile=params.pintlessfile, snapnum=-1, ready_to_bcast=False): with nostdout(): if snapnum == -1: fname = pintlessfile else: fname = pintlessfile + "{}".format(snapnum) self.ID = g3read.read_new(fname, 'ID ', 1) self.V1 = g3read.read_new(fname, 'VZEL', 1) self.V2 = g3read.read_new(fname, 'V2 ', 1) self.V31 = g3read.read_new(fname, 'V3_1', 1) self.V32 = g3read.read_new(fname, 'V3_2', 1) self.Zacc = g3read.read_new(fname, 'ZACC', 1) self.Npart = self.ID.size self.NG = params.ngrid self.Lbox = g3read.GadgetFile(fname, is_snap=False).header.BoxSize self.Cell = self.Lbox / float(self.NG) face = 1 sgn = [1, 1, 1] # Recentering the box self.qPos = np.array([ (self.ID-1)%self.NG,((self.ID-1)//self.NG)%self.NG,\ ((self.ID-1)//self.NG**2)%self.NG ]).transpose() * self.Cell + self.Cell/2. self.qPos = randomizePositions(params.plccenter, face, sgn, self.qPos / self.Lbox) self.V1 = self.Cell * randomizeVelocities(face, sgn, self.V1) / self.Lbox self.V2 = self.Cell * randomizeVelocities(face, sgn, self.V2) / self.Lbox self.V31 = self.Cell * randomizeVelocities(face, sgn, self.V31) / self.Lbox self.V32 = self.Cell * randomizeVelocities(face, sgn, self.V32) / self.Lbox # Changing the Basis to PLC basis self.qPos = self.qPos.dot(params.change_of_basis) self.V1 = self.V1.dot(params.change_of_basis) self.V2 = self.V2.dot(params.change_of_basis) self.V31 = self.V31.dot(params.change_of_basis) self.V32 = self.V32.dot(params.change_of_basis) if ready_to_bcast: # Reshaping to be MPI.Broadcast friendly self.qPos = self.qPos.astype(np.float32).reshape((3, self.Npart)) self.V1 = self.V1.astype(np.float32).reshape((3, self.Npart)) self.V2 = self.V2.astype(np.float32).reshape((3, self.Npart)) self.V31 = self.V31.astype(np.float32).reshape((3, self.Npart)) self.V32 = self.V32.astype(np.float32).reshape((3, self.Npart))
def main(infile,outfile): f = g3read.GadgetFile(infile) with h5py.File(outfile, "w") as g: for ptype in [0,1,2,3,4,5]: if f.header.npart[ptype]==0: continue printf("ptype=%d\n"%(ptype)) for block in f.blocks: if not f.blocks[block].ptypes[ptype]: continue lowerblock = block.lower().strip() if lowerblock in HDF5_MAP: subgroup_name = "PartType%d/%s"%(ptype,HDF5_MAP[lowerblock]) else: raise Exception("I don't know the HDF5 name of block '%s'"%(block)) printf(" block='%s' -> %s\n"%(block,subgroup_name)) dset = g.create_dataset(subgroup_name, data = f.read(block,ptype))
def get_fof_file(filename, use_cache=False): return g3.GadgetFile(filename, is_snap=False)
""" TEST READ MAGNETICUM SIMS """ snapbase = '/HydroSims/Magneticum/Box1a/mr_bao/snapdir_144/snap_144' groupbase = '/HydroSims/Magneticum/Box1a/mr_bao/groups_144/sub_144' from_icluster = 0 #id of first cluster to analyse to_icluster = 11494 #id of last cluster to analyse h = .704 """ BEGIN """ printf("#cluster_id mcri[Msun] rcri[kpc] c200c_dm c200c_all\n") nfiles = 100 icluster = -1 for ifile in range(nfiles): s = g.GadgetFile(groupbase + '.' + str(ifile), is_snap=False) nclusters_in_file = s.header.npart[0] masses = s.read_new("MCRI", 0) positions = s.read_new("RCRI", 0) for icluster_file in range(nclusters_in_file): icluster = icluster + 1 if icluster < from_icluster: continue cluster_data = pp.PostProcessing( cluster_id=icluster, cluster_id_in_file=icluster_file, cluster_i_file=ifile, group_base=groupbase, snap_base=snapbase, n_files=nfiles, subfind_and_fof_same_file=False, )
def main(): import numpy as np import json parser = argparse.ArgumentParser(description='') parser.add_argument('--basename', type=str, help='base file name of groups', required=True) parser.add_argument('--simulation-name', type=str,help='name of simulation', required=True) parser.add_argument('--tag', type=str,help='tag for snapshot', required=True) parser.add_argument('--snap', type=str,help='snap___', required=True) parser.add_argument('--min-field', type=str, default="GLEN") parser.add_argument('--min-val', type=float, default=0.) parser.add_argument('--add-fof', type=str2bool, default=True) parser.add_argument('--add-sf-bounds', type=str2bool, default=False) parser.add_argument('--add-sf-data', type=str2bool, default=False) parser.add_argument('--look', type=str, default="MCRI") parser.add_argument('--only-subfind-existing-columns', action='store_true', default=False) parser.add_argument('--only-fof-existing-columns', action='store_true', default=False) parser.add_argument('--format', type=str, default="%e") parser.add_argument('--delete-groups', help="delete all fofs of the snap before inserting. If false, tries to edit them", type=bool, default=False) args = parser.parse_args() for k in args.__dict__: print(k,args.__dict__[k]) args.first_file = 0 basegroup = args.basename+'groups_%s/sub_%s.'%(args.snap,args.snap) first_filename = basegroup+'0' first_file = g.GadgetFile(first_filename, is_snap=False) header = first_file.header nfiles = header.num_files redshift = header.redshift a = 1./(1.+redshift) #get FOF and SF blocks FOF_blocks=[] SF_blocks=[] all_blocks = first_file.blocks for block_name in all_blocks: ptypes = all_blocks[block_name].ptypes if ptypes[0]: FOF_blocks.append(block_name) if ptypes[1]: SF_blocks.append(block_name) simulation = Simulation.get_or_none(name=args.simulation_name) if simulation is None: simulation = Simulation(name = args.simulation_name, box_size=header.BoxSize, path=args.basename, h=header.HubbleParam) simulation.save() snap = simulation.snaps.where(Snap.name==args.snap).first() new_snap = False if snap is None: snap = Snap(simulation=simulation, name=args.snap, redshift=redshift, a =a, tag=args.tag) new_snap=True snap.save() elif args.add_fof and not args.delete_groups: test_fof = FoF.select().where((FoF.snap==snap) & (FoF.resolvness==1)).first() if test_fof!=None: raise Exception("Clusters for this simulation and snapshot already present in the databse :(") #print(args.add_fof, args.delete_groups, args.add_fof and not args.delete_groups) if args.delete_groups: fofs = snap.fofs Galaxy.delete().where(Galaxy.snap == snap). execute() FoF.delete().where(FoF.snap==snap).execute() FoFFile.delete().where(FoFFile.snap==snap).execute() max_fof_id = None if args.add_sf_bounds or args.add_sf_data: cluster = FoF.select().where((FoF.snap==snap) & (FoF.resolvness==1)).order_by(FoF.glen.asc()).first() if cluster is not None: max_fof_id = cluster.id_cluster args.min_val = 0 printf("Max FoF ID: %d\n"%(max_fof_id)) subfind_cols = None if args.only_subfind_existing_columns: subfind_cols = [f for f in Galaxy.__dict__ if f[0]!='_' and f!='DoesNotExist'] printf("Subfind columns: %s\n"%( ' '.join(subfind_cols)),e=True) fof_cols = None if args.only_fof_existing_columns: fof_cols = [f for f in Galaxy.__dict__ if f[0]!='_' and f!='DoesNotExist'] printf("FoF columns: %s\n"%( ' '.join(fof_cols)),e=True) ifof = 0 previous_info = None for ifile in range(args.first_file,nfiles): filename = basegroup+str(ifile) f = g.GadgetFile(filename, is_snap=False) foffile = FoFFile.select().where((FoFFile.snap==snap)&(FoFFile.ifile==ifile)).first() if foffile is None: FoFFile.insert(snap=snap,id_first_cluster=ifof,ifile=ifile).execute() if f.info is None: f.info = previous_info else: previous_info = f.info val = f.read_new(args.min_field,0) n_fof_groups = len(val) printf("FIlename: %s, from id_cluster=%d %f < %f < %s < %f \n"%(filename,ifof,args.min_val,np.min(val),args.min_field,np.max(val)),e=True) if (args.add_fof and np.max(val)<args.min_val): printf("Reached min val\n"); break if args.add_fof: #insert FOFs props={} for FOF_block in FOF_blocks : props[FOF_block.lower().replace(" ", "")] = f.read_new(FOF_block,0) props["id_cluster"] = np.arange(n_fof_groups)+ifof props["i_file"] = np.zeros(n_fof_groups)+ifile props["i_in_file"] = np.arange(n_fof_groups) props["snap_id"] = np.zeros(n_fof_groups)+snap.id props["start_subfind_file"] = np.zeros(n_fof_groups)-1 props["end_subfind_file"] = np.zeros(n_fof_groups)-1 props["resolvness"] = np.zeros(n_fof_groups)+1 lprops = flat_props(props, n_fof_groups, fof_cols) #print (props,lprops) printf("Clusters: len=%d N=%d from %d to %d\n"%(len(lprops), n_fof_groups,ifof, ifof+n_fof_groups)) ifof+=n_fof_groups param = args.look.lower().replace(" ", "") s= "Block: %%s, min=%s; max=%s \n"%(args.format,args.format) printf(s%(param,np.min(props[param]), np.max(props[param]))) n_inserts = 0 with db.atomic(): n_insert_per_chunk=15 for idx in range(0, len(lprops)+ n_insert_per_chunk, n_insert_per_chunk): chunk = lprops[idx:idx + n_insert_per_chunk] #print(chunk) if len(chunk)>0: FoF.insert_many(chunk).execute() n_inserts+=len(chunk) printf("Clusters: %d inserted\n"%(n_inserts)) if args.add_sf_bounds or args.add_sf_data: grnrs = f.read_new("GRNR",1) if (max_fof_id is not None and np.min(grnrs)>max_fof_id): printf("Reached max fof %d\n"%max_fof_id); break ufofs = np.unique(grnrs) fof_ids_in_sf = ufofs.tolist() if args.add_sf_bounds: q1 = """update FoF set end_subfind_file = {ifile:d} where snap_id={snap_id:d} and id_cluster in ({inc:s});\n""" .format(ifile=ifile, snap_id=snap.id, inc=','.join(map(str, fof_ids_in_sf))) q2 = """update FoF set start_subfind_file = {ifile:d} where snap_id={snap_id:d} and id_cluster in ({inc:s}) and start_subfind_file=-1;\n""" .format(ifile=ifile, snap_id=snap.id, inc=','.join(map(str, fof_ids_in_sf))) n1 = db.execute_sql(q1) n2 = db.execute_sql(q2) n_inserts = len(fof_ids_in_sf) printf("Updated bounds of (max) %d clusters.\n"%(n_inserts)) if args.add_sf_data: #insert SFs props={} mask = np.in1d(grnrs,ufofs) for SF_block in SF_blocks : props[SF_block.lower().replace(" ", "")] = f.read_new(SF_block,1)[mask] props["id_cluster"] = props["grnr"] nsfs = len(props["grnr"]) props["snap_id"] = np.zeros(nsfs)+snap.id props["i_file"] = np.zeros(nsfs)+ifile lprops = flat_props(props, nsfs, subfind_cols) printf("Galaxies: N=%d/%d, from cluster %d to cluster %d\n"%(nsfs,len(grnrs), np.min(props["grnr"]), np.max(props["grnr"]))) n_inserts = 0 with db.atomic(): n_insert_per_chunk=15 for idx in range(0, len(lprops)+ n_insert_per_chunk, n_insert_per_chunk): chunk = lprops[idx:idx + n_insert_per_chunk] if len(chunk)>0: Galaxy.insert_many(chunk).execute() n_inserts+=len(chunk) printf("Galaxy: %d inserts \n"%(n_inserts)) printf("\n")
def fof_info(filename, is_snap=False): #print("_fof info caching ",filename,is_snap) return g.GadgetFile(filename, is_snap)
import numpy as np import g3read as g """ EDIT THIS DATA: """ #simcut output: filename = "Magneticum/Box2_hr/snap_060/22/simcut/7d2726p96wbrmms2/snap_060" #compute everything within this radius: cut_radius = 801. #mu mu=0.6 """ DONT EDIT ANYMORE """ f = g.GadgetFile(filename) # #the function returns a data structure for all selected blocks (POS, VEL, MASS, TEMP) #and stack the data for the various particle types (0,1,2,3,4,5), where 0=gas, 1=dark matter, 4=stars, 5=black holes. #For instance, you can access all positions readin data["POS "] #If you need data separated per particle type, run #data = f.read_new(blocks=[...], ptypes=[...], only_joined_ptypes=False) #and you can access the properties for each data type, for instance gas particles, using data["POS "][0] # data = f.read_new(blocks=["POS ","VEL ","TEMP","MASS"], ptypes=[0,1,2,3,4,5]) center = np.average(data["POS "],weights=data["MASS"],axis=0) #the function 'g.to_spherical()' returns data with columns 0,1,2 being rho,theta,phi spherical_cut = g.to_spherical(data["POS "],center)[:,0]<cut_radius