def loadTree(run, mrgs=None, loadsave=True, verbose=True): """Load tree data from save file if possible, or recalculate directly. Arguments --------- run : <int>, Illlustris run number {1, 3} mrgs : <dict>, (optional=None), BHMerger data, reloaded if not provided loadsave : <bool>, (optional=True), try to load tree data from previous save verbose : <bool>, (optional=True), Print verbose output Returns ------- tree : <dict>, container for tree data - see BHTree doc """ if verbose: print(" - - BHTree.loadTree()") fname = constants.GET_BLACKHOLE_TREE_FILENAME(run, VERSION) # Reload existing BH Merger Tree # ------------------------------ if loadsave: if verbose: print((" - - - Loading save file '{:s}'".format(fname))) if os.path.exists(fname): tree = zio.npzToDict(fname) if verbose: print(" - - - - Tree loaded") else: loadsave = False warnStr = "File '%s' does not exist!" % (fname) warnings.warn(warnStr, RuntimeWarning) # Recreate BH Merger Tree # ----------------------- if not loadsave: if verbose: print(" - - - Reconstructing BH Merger Tree") # Load Mergers if needed if mrgs is None: from illpy_lib.illbh import mergers mrgs = mergers.load_fixed_mergers(run) if verbose: print((" - - - - Loaded {:d} mrgs".format(mrgs[MERGERS.NUM]))) # Construct Tree if verbose: print(" - - - - Constructing Tree") tree = _constructBHTree(run, mrgs, verbose=verbose) # Analyze Tree Data, store meta-data to tree dictionary timeBetween, numPast, numFuture = analyzeTree(tree, verbose=verbose) # Save Tree data zio.dictToNPZ(tree, fname, verbose=True) return tree
def loadMergerEnvironments(run, loadsave=True, verbose=True, version=_VERSION): """ Load all subhalo environment data as a dictionary with keys from ``ENVIRON``. NOTE: the 'env_in' dictionary was created using `_in_merger_environments()` (i.e. manually), and might not be recreated appropriately by `_collectMergerEnvironments()`. Arguments --------- run <int> : illustris simulation run number, {1, 3} loadsave <bool> : optional, load existing save if it exists, otherwise create new verbose <bool> : optional, print verbose output version <flt> : optional, version number to load (can only create current version!) Returns ------- env <dict> : all environment data for all subhalos, keys given by ``ENVIRON`` class """ if verbose: print(" - - Environments.loadMergerEnvironments()") fname_out = _GET_MERGER_ENVIRONMENT_FILENAME(run, version=version) fname_in = zio.modify_filename(fname_out, append='_in') # Try to Load Existing Save File # ------------------------------ if loadsave: if verbose: print(( " - - Attempting to load saved file from '{}' and '{}'".format( fname_out, fname_in))) if os.path.exists(fname_out): env_out = zio.npzToDict(fname_out) env_in = zio.npzToDict(fname_in) if verbose: print(" - - - Loaded.") else: print((" - - - File '{}' or '{}' does not exist!".format( fname_out, fname_in))) loadsave = False # Import environment data directly, and save # ------------------------------------------ if not loadsave: if verbose: print((" - - Importing Merger Environments, version %s" % (str(_VERSION)))) env_out, env_in = _collectMergerEnvironments(run, verbose=verbose, version=version) zio.dictToNPZ(env_out, fname_out, verbose=True) zio.dictToNPZ(env_in, fname_in, verbose=True) return env_out, env_in
def _saveUnique(run, snap, fname, uids, uscales, log): """Create, save and return a dictionary of Unique BH Data. Arguments --------- run : int Illustris run number {1,3} snap : int or array_like of int Illustris snapshot number {1,135} fname : str Filename to save to. uids : (N) array_like of int Unique ID numbers (NOTE: `N` may be zero). uscales : (N,2) array_like of float First and last scale-factors of unique BHs (NOTE: `N` may be zero). log : ``logging.Logger`` object Logging object for output. Returns ------- data : dict Input data organized into a dictionary with added metadata. """ log.debug("_saveUnique()") data = { DETAILS.RUN: run, DETAILS.SNAP: snap, DETAILS.FILE: fname, DETAILS.VERSION: __version__, DETAILS.CREATED: datetime.now().ctime(), DETAILS.IDS: uids.astype(DTYPE.ID), DETAILS.SCALES: uscales.astype(DTYPE.SCALAR), DETAILS.NUM: uids.size, } for key, val in list(data.items()): data[key] = np.asarray(val) zio.dictToNPZ(data, fname, verbose=False, log=log) return data
def _in_merger_environments(run, verbose=True, version=_VERSION): merger_snaps, snap_mergers, subh_ind_out, subh_ind_in = \ get_merger_and_subhalo_indices(run, verbose=verbose) numMergers = len(merger_snaps) # Get all subhalos for each snapshot (including duplicates and missing) snap_subh_out = [subh_ind_out[smrg] for smrg in snap_mergers] snap_subh_in = [subh_ind_in[smrg] for smrg in snap_mergers] sampleSnap = 135 env_in = _initStorage(run, sampleSnap, snap_subh_out[sampleSnap], numMergers, verbose=verbose, version=version) beg = datetime.now() pbar = zio.getProgressBar(numMergers) pbar.start() count = 0 numGood = 0 numBad = 0 # Iterate over each Snapshot for snap, (merg, subh_in) in zmath.renumerate( list(zip(snap_mergers, snap_subh_in))): # Get indices of valid subhalos inds_subh_in = np.where(subh_in >= 0)[0] # Skip this snapshot if no valid subhalos if inds_subh_in.size == 0 or len(merg) == 0: continue # Select corresponding merger indices inds_in = np.array(merg)[inds_subh_in] # Get Data from Group Catalog # --------------------------- try: gcat = Subhalo.importGroupCatalogData( run, snap, subhalos=subh_in[inds_subh_in], verbose=False) # Count bad, and skip to next snapshot on failure except: print(("gcat import snap {} failed. {} Mergers.".format( snap, len(merg)))) numBad += len(merg) count += len(merg) pbar.update(count) continue # Extract desired data for key in env_in[ENVIRON.GCAT_KEYS]: env_in[key][inds_in, ...] = gcat[key][...] # Load Each Merger-Subhalo file contents # -------------------------------------- for ind_subh, merger in zip(inds_subh_in, inds_in): count += 1 subhalo = subh_in[ind_subh] # Store Subhalo number for each merger env_in[ENVIRON.SUBH][merger] = subhalo env_in[ENVIRON.SNAP][merger] = snap # Set as good merger-environment env_in[ENVIRON.STAT][merger] = 1 numGood += 1 # Update progessbar pbar.update(count) pbar.finish() end = datetime.now() if verbose: print((" - - - Completed after %s" % (str(end - beg)))) print((" - - - Total %5d/%5d = %.4f" % (count, numMergers, count / numMergers))) print((" - - - Good %5d/%5d = %.4f" % (numGood, numMergers, numGood / numMergers))) print((" - - - Bad %5d/%5d = %.4f" % (numBad, numMergers, numBad / numMergers))) fname_out = _GET_MERGER_ENVIRONMENT_FILENAME(run, version=version) fname_in = zio.modify_filename(fname_out, append='_in') print("fname_out = '{}'".format(fname_out)) print("fname_in = '{}'".format(fname_in)) zio.dictToNPZ(env_in, fname_in, verbose=True) return env_in
def _loadSingleMergerEnv(run, snap, subhalo, boundID=None, radBins=None, loadsave=True, verbose=False): """ Import and save merger-subhalo environment data. Arguments --------- run <int> : illustris simulation number {1, 3} snap <int> : illustris snapshot number {0, 135} subhalo <int> : subhalo index number for shit snapshot boundID <int> : ID of this subhalo's most-bound particle radBins <flt>[N] : optional, positions of radial bins for creating profiles loadSave <bool> : optional, load existing save file if possible verbose <bool> : optional, print verbose output Returns ------- env <dict> : loaded dictionary of environment data retStat <int> : ``_ENVSTAT`` value for status of this environment """ if verbose: print(" - - Environments._loadSingleMergerEnv()") fname = _GET_MERGER_SUBHALO_FILENAME(run, snap, subhalo) if verbose: print((" - - - Filename '%s'" % (fname))) # If we shouldnt or cant load existing save, reload profiles if not loadsave or not os.path.exists(fname): # Load Radial Profiles radProfs = Profiler.subhaloRadialProfiles(run, snap, subhalo, radBins=radBins, mostBound=boundID, verbose=verbose) # Invalid profiles on failure if radProfs is None: warnStr = "INVALID PROFILES at Run %d, Snap %d, Subhalo %d, Bound ID %s" \ % (run, snap, subhalo, str(boundID)) warnings.warn(warnStr, RuntimeWarning) # Set return status to failure retStat = _ENVSTAT.FAIL env = None # Valid profiles else: # Unpack data outRadBins, posRef, retBoundID, partTypes, partNames, numsBins, \ massBins, densBins, potsBins, dispBins = radProfs if boundID is not None and retBoundID != boundID: warnStr = "Run %d, SNap %d, Subhalo %d" % (run, snap, subhalo) warnStr += "\nSent BoundID = %d, Returned = %d!" % (boundID, retBoundID) warnings.warn(warnStr, RuntimeWarning) # Build dict of data env = { ENVIRON.RUN: run, ENVIRON.SNAP: snap, ENVIRON.VERS: _VERSION, ENVIRON.DATE: datetime.now().ctime(), ENVIRON.TYPE: partTypes, ENVIRON.NAME: partNames, ENVIRON.SUBH: subhalo, ENVIRON.BPID: retBoundID, ENVIRON.CENT: posRef, ENVIRON.RADS: outRadBins, ENVIRON.NUMS: numsBins, ENVIRON.DENS: densBins, ENVIRON.MASS: massBins, ENVIRON.POTS: potsBins, ENVIRON.DISP: dispBins } # Save Data as NPZ file zio.dictToNPZ(env, fname, verbose=verbose) # Set return status to new file created retStat = _ENVSTAT.NEWF # File already exists else: # Load data from save file env = zio.npzToDict(fname) if verbose: print( (" - - - File already exists for Run %d, Snap %d, Subhalo %d" % (run, snap, subhalo))) # Set return status to file already exists retStat = _ENVSTAT.EXST return env, retStat
def _loadSingleSnapshotBHs(run, snapNum, numMergers, idxs, bhids, logger, rank=0, loadsave=True): """Load the data for BHs in a single snapshot, save to npz file. If no indices (``idxs``) or BH IDs (``bhids``) are given, or this is a 'bad' snapshot, then it isn't actually loaded and processed. An NPZ file with all zero entries is still produced. Arguments --------- run : int, Illustris simulation number {1, 3}. snapNum : int, Illustris snapshot number {1, 135}. numMergers : int, Total number of mrgs. idxs : (N,) array of int, Merger indices for this snapshot with `N` mrgs. bhids : (N,2,) array of int, BH ID numbers, `IN` and `OUT` BH for each merger. logger : ``logging.Logger`` object, Object for logging. rank : int Rank of this processor (used for logging). loadsave : bool, Load existing save if it exists. Returns ------- data : dict, Data for this snapshot. pos : int, Number of BHs successfully found. neg : int, Number of BHs failed to be found. new : int, `+1` if a new file was created, otherwise `0`. """ import illpy as ill logger.warning("BHSnapshotData._loadSingleSnapshotBHs()") illdir = GET_ILLUSTRIS_OUTPUT_DIR(run) fname = _GET_BH_SINGLE_SNAPSHOT_FILENAME(run, snapNum) logger.warning("Snap %d, filename '%s'" % (snapNum, fname)) pos = 0 neg = 0 new = 0 # Load and Return existing save if desired # ---------------------------------------- if (loadsave and os.path.exists(fname)): logger.warning("Loading existing file") data = zio.npzToDict(fname) return data, pos, neg, new # Initialize dictionary of results # -------------------------------- logger.info("Initializing storage") data = _initStorage(numMergers) for index, tid in zip(idxs, bhids): for BH in [BH_TYPE.IN, BH_TYPE.OUT]: data[BH_SNAP.TARGET][index, BH] = tid[BH] # Decide if this is a valid Snapshot # ---------------------------------- process_snapshot = True # Some illustris-1 snapshots are bad if (snapNum in GET_BAD_SNAPS(run)): logger.warning("Skipping bad snapshot.") process_snapshot = False # Make sure there are mrgs in this snapshot if (len(idxs) <= 0 or len(bhids) <= 0): logger.warning("Skipping snap %d with no valid BHs" % (snapNum)) process_snapshot = False # Load And Process Snapshot if its good # ------------------------------------- if (process_snapshot): logger.info("Processing Snapshot") # Load Snapshot # ------------- logger.debug("- Loading snapshot %d" % (snapNum)) with zio.StreamCapture() as output: snapshot = ill.snapshot.loadSubset(illdir, snapNum, 'bh', fields=SNAPSHOT_FIELDS) snap_keys = list(snapshot.keys()) if ('count' in snap_keys): snap_keys.remove('count') logger.debug("- - Loaded %d particles" % (snapshot['count'])) # Make sure all target keys are present union = list(set(snap_keys) & set(SNAPSHOT_FIELDS)) if (len(union) != len(SNAPSHOT_FIELDS)): logger.error("snap_keys = '%s'" % (str(snap_keys))) logger.error("SNAPSHOT_FIELDS = '%s'" % (str(SNAPSHOT_FIELDS))) errStr = "Field mismatch at Rank %d, Snap %d!" % (rank, snapNum) zio._mpiError(comm, log=logger, err=errStr) # Match target BHs # ---------------- logger.debug("- Matching %d BH Mergers" % (len(bhids))) for index, tid in zip(idxs, bhids): for BH in [BH_TYPE.IN, BH_TYPE.OUT]: ind = np.where(snapshot['ParticleIDs'] == tid[BH])[0] if (len(ind) == 1): pos += 1 data[BH_SNAP.VALID][index, BH] = True for key in SNAPSHOT_FIELDS: data[key][index, BH] = snapshot[key][ind[0]] else: neg += 1 logger.debug("- Processed, pos %d, neg %d" % (pos, neg)) # Add Metadata and Save File # ========================== logger.debug("Adding metadata to dictionary") data[BH_SNAP.RUN] = run data[BH_SNAP.SNAP] = snapNum data[BH_SNAP.VERSION] = _VERSION data[BH_SNAP.CREATED] = datetime.now().ctime() data[BH_SNAP.DIR_SRC] = illdir data[BH_SNAP.FIELDS] = SNAPSHOT_FIELDS data[BH_SNAP.DTYPES] = SNAPSHOT_DTYPES logger.info("Saving data to '%s'" % (fname)) zio.dictToNPZ(data, fname) new = 1 return data, pos, neg, new
def loadBHSnapshotData(run, version=None, loadsave=True, verbose=False, logger=None): """Load an existing BH Snapshot data save file, or attempt to recreate it (slow!). If the data is recreated (using ``_mergeBHSnapshotFiles``), it will be saved to an npz file. The loaded parameters are stored to a dictionary with keys given by the parameters in ``bh_constants.BH_SNAP``. Arguments --------- run : int, Illustris run number {1, 3}. version : flt, Version number to load/save. loadsave : bool, If `True`, attempt to load an existing save. verbose : bool, Print verbose output. logger : ``logging.Logger`` object, Object to use for logging output messages. Returns ------- data : dict, Dictionary of BH Snapshot data. Keys are given by the entries to ``bh_constants.BH_SNAP``. """ # Create default logger if needed # ------------------------------- if (not isinstance(logger, logging.Logger)): logger = zio.default_logger(logger, verbose=verbose) logger.debug("BHSnapshotData.loadBHSnapshotData()") if (version is None): version = _VERSION oldVers = False # Warn if attempting to use an old version number if (version != _VERSION): oldVers = True logger.warning("WARNING: loading v%.2f behind current v%.2f" % (version, _VERSION)) # Get save filename fname = _GET_BH_SNAPSHOT_FILENAME(run, version=version) # Load Existing File # ------------------ if (loadsave): logger.info("Loading from '%s'" % (fname)) if (os.path.exists(fname)): data = zio.npzToDict(fname) else: logger.warning("WARNING: '%s' does not exist! Recreating!" % (fname)) loadsave = False # Recreate data (Merge individual snapshot files) # ----------------------------------------------- if (not loadsave): logger.info("Recreating '%s'" % (fname)) # Dont allow old versions to be recreated if (oldVers): raise RuntimeError("Cannot recreate outdated version %.2f!!" % (version)) data = _mergeBHSnapshotFiles(run, logger) # Add Metadata logger.debug("Adding metadata") data[BH_SNAP.RUN] = run data[BH_SNAP.VERSION] = _VERSION data[BH_SNAP.CREATED] = datetime.now().ctime() data[BH_SNAP.FIELDS] = SNAPSHOT_FIELDS data[BH_SNAP.DTYPES] = SNAPSHOT_DTYPES # Save logger.debug("Saving") zio.dictToNPZ(data, fname) logger.info("Saved to '%s'" % (fname)) return data
def main(): """ Calculate distribution functions for all galaxies in parallel. Runs the `_runMaster` on process 0 which communicates with, and distributes jobs to, each of the `_runSlave` processes on all the other processors. """ # Initialize MPI Parameters # ------------------------- from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.rank size = comm.size if size <= 1: raise RuntimeError("Not setup for serial runs!") sets = settings.Settings() mstar = sets.MSTAR * MSOL if rank == 0: NAME = sys.argv[0] print("\n%s\n%s\n%s" % (NAME, '=' * len(NAME), str(datetime.now()))) zio.check_path(sets.GET_DIR_LOGS()) # Make sure log-path is setup before continuing comm.Barrier() # Parse Arguments # --------------- args = _parseArguments(sets) run = args.run verbose = args.verbose smooth = args.smooth relAcc = args.relAcc intSteps = args.intSteps # Load logger log = _loadMPILogger(rank, verbose, sets) if rank < 2: print("Log (Rank %d) filename '%s'" % (rank, log.filename)) # log runtime parameters log.info("run = %d " % (run)) log.info("version = %s " % (__version__)) log.info("MPI comm size = %d " % (size)) log.info("Rank = %d " % (rank)) log.info("") log.info("verbose = %s " % (str(verbose))) log.info("smooth = %d " % (smooth)) log.info("relAcc = %e " % (relAcc)) log.info("intSteps = %d " % (intSteps)) log.info("") # Master Process # -------------- if rank == 0: beg_all = datetime.now() try: log.info("Running Master") eps, ndens, ndD1, ndD2, dist_funcs, dfErrs, recDens = _runMaster( run, comm, log) except Exception as err: _mpiError(comm, log, err) end_all = datetime.now() log.debug("Done after '%s'" % (str(end_all - beg_all))) fname = sets.GET_DIST_FUNC_FILENAME(run=run, vers=__version__) zio.check_path(fname) data = {} data['run'] = run data['eps'] = eps data['ndens'] = ndens data['ndD1'] = ndD1 data['ndD2'] = ndD2 data['distfuncs'] = dist_funcs data['dferrs'] = dfErrs data['recdens'] = recDens data['version'] = __version__ zio.dictToNPZ(data, fname, verbose=True) log.info("Saved data to '%s'" % (fname)) # Slave Processes # --------------- else: try: log.info("Running slave") _runSlave(comm, smooth, relAcc, intSteps, mstar, log) except Exception as err: _mpiError(comm, log, err) log.info("Done.") return
def loadBHHostsSnap(run, snap, version=None, loadsave=True, verbose=True, bar=None, convert=None): """Load pre-existing, or manage the creation of the particle offset table. Arguments --------- run <int> : illustris simulation number {1, 3} snap <int> : illustris snapshot number {1, 135} loadsave <bool> : optional, load existing table verbose <bool> : optional, print verbose output Returns ------- offsetTable <dict> : particle offset table, see `ParticleHosts` docs for more info. """ if verbose: print(" - - ParticleHosts.loadBHHostsSnap()") if (bar is None): bar = bool(verbose) # Load Existing Save # ================== if (loadsave): saveFile = _GET_BH_HOSTS_SNAP_TABLE_FILENAME(run, snap, version) if verbose: print((" - - - Loading from save '{:s}'".format(saveFile))) # Make sure path exists if (os.path.exists(saveFile)): hostTable = zio.npzToDict(saveFile) if verbose: print(" - - - - Table loaded") else: if verbose: print(" - - - - File does not Exist, reconstructing BH Hosts") loadsave = False # Reconstruct Hosts Table # ======================= if (not loadsave): if verbose: print(" - - - Constructing Offset Table") start = datetime.now() if (version is not None): raise RuntimeError("Can only create version '%s'" % _VERSION) saveFile = _GET_BH_HOSTS_SNAP_TABLE_FILENAME(run, snap) offsetFile = '' if (convert is not None): offsetFile = _GET_OFFSET_TABLE_FILENAME(run, snap, version=convert) if verbose: print((" - - - Trying to convert from existing '{:s}'".format( offsetFile))) # Convert an Existing (Full) Offset Table into BH Hosts # ----------------------------------------------------- if os.path.exists(offsetFile): offsetTable = zio.npzToDict(offsetFile) bhInds = offsetTable[OFFTAB.BH_INDICES] bhIDs = offsetTable[OFFTAB.BH_IDS] bhHalos = offsetTable[OFFTAB.BH_HALOS] bhSubhs = offsetTable[OFFTAB.BH_SUBHALOS] else: if verbose: print(" - - - Reconstructing offset table") # Construct Offset Data haloNums, subhNums, offsets = _constructOffsetTable( run, snap, verbose=verbose) # Construct BH index Data # Catch errors for bad snapshots try: bhInds, bhIDs = _constructBHIndexTable(run, snap, verbose=verbose) except: # If this is a known bad snapshot, set values to None if (snap in GET_BAD_SNAPS(run)): if verbose: print( (" - - - BAD SNAPSHOT: RUN {:d}, Snap {:d}".format( run, snap))) bhInds = None bhIDs = None bhHalos = None bhSubhs = None # If this is not a known problem, still raise error else: print(( "this is not a known bad snapshot: run {:d}, snap {:d}" .format(run, snap))) raise # On success, Find BH Subhalos else: binInds = np.digitize(bhInds, offsets[:, PARTICLE.BH]).astype( DTYPE.INDEX) - 1 if (any(binInds < 0)): raise RuntimeError("Some bhInds not matched!! '%s'" % (str(bads))) bhHalos = haloNums[binInds] bhSubhs = subhNums[binInds] # Save To Dict # ------------ hostTable = {} # Metadata hostTable[OFFTAB.RUN] = run hostTable[OFFTAB.SNAP] = snap hostTable[OFFTAB.VERSION] = _VERSION hostTable[OFFTAB.CREATED] = datetime.now().ctime() hostTable[OFFTAB.FILENAME] = saveFile # BH Data hostTable[OFFTAB.BH_INDICES] = bhInds hostTable[OFFTAB.BH_IDS] = bhIDs hostTable[OFFTAB.BH_HALOS] = bhHalos hostTable[OFFTAB.BH_SUBHALOS] = bhSubhs # Save to file zio.dictToNPZ(hostTable, saveFile, verbose=verbose) stop = datetime.now() if verbose: print((" - - - - Done after {:s}".format(str(stop - start)))) return hostTable
def loadBHHosts(run, loadsave=True, version=None, verbose=True, bar=None, convert=None): """Merge individual snapshot's blackhole hosts files into a single file. Arguments --------- run <int> : illustris simulation number {1, 3} loadsave <bool> : optional, load existing save if possible version <flt> : optional, target version number verbose <bool> : optional, bar <bool> : optional, convert <bool> : optional, Returns ------- bhHosts <dict> : table of hosts for all snapshots """ if verbose: print(" - - ParticleHosts.loadBHHosts()") if (bar is None): bar = bool(verbose) # Load Existing Save # ================== if (loadsave): saveFile = _GET_BH_HOSTS_TABLE_FILENAME(run, version=version) if verbose: print((" - - - Loading from save '{:s}'".format(saveFile))) # Make sure path exists if (os.path.exists(saveFile)): bhHosts = zio.npzToDict(saveFile) if verbose: print(" - - - - Table loaded") else: if verbose: print(" - - - - File does not Exist, reconstructing BH Hosts") loadsave = False # Reconstruct Hosts Table # ======================= if (not loadsave): if verbose: print(" - - - Constructing Hosts Table") start = datetime.now() if (version is not None): raise RuntimeError("Can only create version '%s'" % _VERSION) saveFile = _GET_BH_HOSTS_TABLE_FILENAME(run) # Create progress-bar pbar = zio.getProgressBar(NUM_SNAPS) if bar: pbar.start() # Select the dict-keys for snapshot hosts to transfer hostKeys = [ OFFTAB.BH_IDS, OFFTAB.BH_INDICES, OFFTAB.BH_HALOS, OFFTAB.BH_SUBHALOS ] # Create dictionary # ----------------- bhHosts = {} # Add metadata bhHosts[OFFTAB.RUN] = run bhHosts[OFFTAB.VERSION] = _VERSION bhHosts[OFFTAB.CREATED] = datetime.now().ctime() bhHosts[OFFTAB.FILENAME] = saveFile # Load All BH-Hosts Files # ----------------------- for snap in range(NUM_SNAPS): # Load Snapshot BH-Hosts hdict = loadBHHostsSnap(run, snap, loadsave=True, verbose=True, convert=convert) # Extract and store target data snapStr = OFFTAB.snapDictKey(snap) bhHosts[snapStr] = {hkey: hdict[hkey] for hkey in hostKeys} if bar: pbar.update(snap) if bar: pbar.finish() # Save to file zio.dictToNPZ(bhHosts, saveFile, verbose=verbose) stop = datetime.now() if verbose: print(" - - - - Done after %s" % (str(stop - start))) return bhHosts