def do_plot(self, subcmd, opts, arg): """ Create a set of images ${cmd_usage} ${cmd_option_list} """ pf = _fix_pf(arg) center = opts.center if opts.center == (-1, -1, -1): mylog.info("No center fed in; seeking.") v, center = pf.h.find_max("Density") center = na.array(center) pc = raven.PlotCollection(pf, center=center) if opts.axis == 4: axes = range(3) else: axes = [opts.axis] for ax in axes: mylog.info("Adding plot for axis %i", ax) if opts.projection: pc.add_projection(opts.field, ax, weight_field=opts.weight, center=center) else: pc.add_slice(opts.field, ax, center=center) if opts.grids: pc.plots[-1].modify["grids"]() pc.set_width(opts.width, opts.unit) pc.set_cmap(opts.cmap) if opts.zlim: pc.set_zlim(*opts.zlim) if not os.path.isdir(opts.output): os.makedirs(opts.output) pc.save(os.path.join(opts.output, "%s" % (pf)))
def run_func(pf, *args, **kwargs): PlotTypes.Initialize() pf1 = _fix_pf(pf) if "center" not in kwargs: kwargs['center'] = pf1.h.find_max("Density")[1] p = func(pf1, *args, **kwargs) # This next bit is to ensure we have a strong reference # until the plot object is deleted if pf1 is not pf: p["pf"] = pf1 return p
def do_hop(self, subcmd, opts, arg): """ Run HOP on one or more datasets ${cmd_option_list} """ pf = _fix_pf(arg) kwargs = {'dm_only': opts.dm_only} if opts.threshold is not None: kwargs['threshold'] = opts.threshold hop_list = HaloFinder(pf, **kwargs) if opts.output is None: fn = "%s.hop" % pf else: fn = opts.output hop_list.write_out(fn)
def do_stats(self, subcmd, opts, arg): """ Print stats and maximum density for one or more datasets ${cmd_option_list} """ pf = _fix_pf(arg) pf.h.print_stats() v, c = pf.h.find_max("Density") print "Maximum density: %0.5e at %s" % (v, c) if opts.output is not None: t = pf["InitialTime"] * pf['years'] open(opts.output, "a").write("%s (%0.5e years): %0.5e at %s\n" % (pf, t, v, c))
def do_zoomin(self, subcmd, opts, arg): """ Create a set of zoomin frames ${cmd_option_list} """ pf = _fix_pf(arg) min_width = opts.min_width * pf.h.get_smallest_dx() if opts.axis == 4: axes = range(3) else: axes = [opts.axis] pc = PlotCollection(pf) for ax in axes: if opts.projection: p = pc.add_projection(opts.field, ax, weight_field=opts.weight) else: p = pc.add_slice(opts.field, ax) if opts.unit_boxes: p.modify["units"](factor=8) if opts.text is not None: p.modify["text"]((0.02, 0.05), opts.text.replace(r"\n", "\n"), text_args=dict(size="medium", color="w")) pc.set_width(opts.max_width, '1') # Check the output directory if not os.path.isdir(opts.output): os.mkdir(opts.output) # Figure out our zoom factor # Recall that factor^nframes = min_width / max_width # so factor = (log(min/max)/log(nframes)) mylog.info("min_width: %0.3e max_width: %0.3e nframes: %0.3e", min_width, opts.max_width, opts.nframes) factor = 10**(math.log10(min_width / opts.max_width) / opts.nframes) mylog.info("Zoom factor: %0.3e", factor) w = 1.0 for i in range(opts.nframes): mylog.info("Setting width to %0.3e", w) mylog.info("Saving frame %06i", i) pc.set_width(w, "1") if opts.zlim: pc.set_zlim(*opts.zlim) if opts.dex: pc.set_zlim('min', None, opts.dex) pc.set_cmap(opts.cmap) pc.save(os.path.join(opts.output, "%s_frame%06i" % (pf, i))) w = factor**i
def do_amira(self, subcmd, opts, start, stop): """ Export multiple data sets in amira format ${cmd_usage} ${cmd_option_list} """ from yt.extensions.HierarchySubset import ExtractedHierarchy import h5py first = int(start) last = int(stop) # Set up our global metadata afile = h5py.File(opts.output, "w") md = afile.create_group("/globalMetaData") md.attrs['datatype'] = 0 md.attrs['staggering'] = 1 md.attrs['fieldtype'] = 1 md.attrs['minTimeStep'] = first md.attrs['maxTimeStep'] = last times = [] # Get our staggering correct based on skip timesteps = na.arange(first, last + 1, opts.skip, dtype='int32') time_offset = None t2 = [] offset = None if opts.recenter: tpf = _fix_pf("%s%04i" % (opts.basename, timesteps[-1])) offset = tpf.h.find_max("Density")[1] del tpf for n in timesteps: # Try super hard to get the right parameter file pf = _fix_pf("%s%04i" % (opts.basename, n)) hh = pf.h times.append(pf["InitialTime"] * pf["years"]) eh = ExtractedHierarchy(pf, opts.min_level, max_level=opts.max_level, offset=offset, always_copy=opts.always_copy) eh.export_output(afile, n, opts.field) t2.append(pf["InitialTime"]) # This should be the same md.attrs['rootDelta'] = (pf["unitary"] / pf["TopGridDimensions"]).astype('float64') md.attrs['minTime'] = times[0] md.attrs['maxTime'] = times[-1] md.attrs['numTimeSteps'] = len(timesteps) # I think we just want one value here rel_times = na.array( times, dtype='float64') - int(opts.subtract_time) * times[0] md.create_dataset("sorted_times", data=na.array(rel_times)) md.create_dataset("sorted_timesteps", data=timesteps) afile.close()