def dump_output(self, *print_properties): """ Print output based on level of detail required The default detail level (low) is the integrator's calc's update property for each named particle array. The higher detail level dumps all particle array properties. """ fname = self.fname + '_' props = {} cell_size = self.particles.cell_manager.cell_size for pa in self.particles.arrays: name = pa.name _fname = os.path.join(self.output_directory, fname + name + '_' + str(self.t) + '.npz') if self.detailed_output: savez(_fname, dt=self.dt, **pa.properties) else: for prop in print_properties: props[prop] = pa.get(prop) savez(_fname, dt=self.dt, cell_size=cell_size, np=pa.num_real_particles, **props)
def dump_output(self, *print_properties): """ Print output based on level of detail required The default detail level (low) is the integrator's calc's update property for each named particle array. The higher detail level dumps all particle array properties. """ fname = self.fname + "_" props = {} cell_size = self.particles.cell_manager.cell_size for pa in self.particles.arrays: name = pa.name _fname = os.path.join(self.output_directory, fname + name + "_" + str(self.t) + ".npz") if self.detailed_output: savez(_fname, dt=self.dt, **pa.properties) else: for prop in print_properties: props[prop] = pa.get(prop) savez(_fname, dt=self.dt, cell_size=cell_size, np=pa.num_real_particles, **props)
def save_current_genealogy(self): savefpath = os.path.join(self._logdir, f'genealogy_gen{self._istep:03d}.npz') save_kwargs = { 'image_ids': np.array(self._curr_sample_ids, dtype=str), 'genealogy': np.array(self._genealogy, dtype=str) } utils.savez(savefpath, save_kwargs)
ImageidTable[stepi][0:len(score_tmp)] = image_ids except FileNotFoundError: if stepi == 0: startnum += 1 steps += 1 continue else: print("maximum steps is %d." % stepi) ScoreEvolveTable = ScoreEvolveTable[0:stepi, :] ImageidTable = ImageidTable[0:stepi] steps = stepi break ImageidTable = np.asarray(ImageidTable) utils.savez(os.path.join(CurDataDir, "scores_summary_table.npz"), { "ScoreEvolveTable": ScoreEvolveTable, "ImageidTable": ImageidTable }) #%% Filter the Samples that has Score in a given range def select_image(CurDataDir, lb=200, ub=None): fncatalog = os.listdir(CurDataDir) ScoreEvolveTable, ImageidTable = utils.scores_summary(CurDataDir) # it will automatic read the existing summary or generate one. if ub is None: ub = np.nanmax(ScoreEvolveTable) + 1 if lb is None: lb = np.nanmin(ScoreEvolveTable) - 1 imgid_list = ImageidTable[np.logical_and(ScoreEvolveTable > lb, ScoreEvolveTable < ub)]
def dump_output(self, dt, *print_properties): """ Print output based on level of detail required The default detail level (low) is the integrator's calc's update property for each named particle array. The higher detail level dumps all particle array properties. Format: ------- A single file named as: <fname>_<rank>_<count>.npz The output file contains the following fields: solver_data : Solver related data like time step, time and iteration count. These are used to resume a simulation. arrays : A dictionary keyed on particle array names and with particle properties as value. version : The version number for this format of file output. The current version number is 1 Example: -------- data = load('foo.npz') version = data['version'] dt = data['solver_data']['dt'] t = data['solver_data']['t'] array = data['arrays'][array_name].astype(object) array['x'] """ if self.with_cl: self.particles.read_from_buffer() fname = self.fname + '_' props = {"arrays":{}, "solver_data":{}} cell_size = None if not self.with_cl: cell_size = self.particles.cell_manager.cell_size _fname = os.path.join(self.output_directory, fname + str(self.count) +'.npz') if self.detailed_output: for array in self.particles.arrays: props["arrays"][array.name]=array.get_property_arrays(all=True) else: for array in self.particles.arrays: props["arrays"][array.name]=array.get_property_arrays(all=False) # Add the solver data props["solver_data"]["dt"] = dt props["solver_data"]["t"] = self.t props["solver_data"]["count"] = self.count if self.parallel_output_mode == "collected" and self.in_parallel: comm = self.comm arrays = props["arrays"] numarrays = len(arrays) array_names = arrays.keys() # gather the data from all processors collected_data = comm.gather(arrays, root=0) if self.rank == 0: props["arrays"] = {} size = comm.Get_size() # concatenate the arrays for array_name in array_names: props["arrays"][array_name] = {} _props = collected_data[0][array_name].keys() for prop in _props: prop_arr = numpy.concatenate( [collected_data[pid][array_name][prop] for pid in range(size)] ) props["arrays"][array_name][prop] = prop_arr savez(_fname, version=1, **props) else: savez(_fname, version=1, **props)