def __init__( self, filename, auxiliary_files=None, nprocs=None, storage_filename=None, nan_mask=None, spectral_factor=1.0, suppress_astropy_warnings=True, parameters=None, units_override=None, unit_system="cgs", z_axis_decomp=None, ): if auxiliary_files is None: auxiliary_files = [] self.spectral_factor = spectral_factor if z_axis_decomp is not None: issue_deprecation_warning( "The 'z_axis_decomp' argument is deprecated, " "as this decomposition is now performed for " "spectral-cube FITS datasets automatically.") super(SpectralCubeFITSDataset, self).__init__( filename, nprocs=nprocs, auxiliary_files=auxiliary_files, storage_filename=storage_filename, suppress_astropy_warnings=suppress_astropy_warnings, nan_mask=nan_mask, parameters=parameters, units_override=units_override, unit_system=unit_system, )
def writeto(self, fileobj, fields=None, overwrite=False, **kwargs): r""" Write all of the fields or a subset of them to a FITS file. Parameters ---------- fileobj : string The name of the file to write to. fields : list of strings, optional The fields to write to the file. If not specified all of the fields in the buffer will be written. clobber : overwrite, optional Whether or not to overwrite a previously existing file. Default: False **kwargs Additional keyword arguments are passed to :meth:`~astropy.io.fits.HDUList.writeto`. """ if "clobber" in kwargs: issue_deprecation_warning("The \"clobber\" keyword argument " "is deprecated. Use the \"overwrite\" " "argument, which has the same effect, " "instead.") overwrite = kwargs.pop("clobber") if fields is None: hdus = self.hdulist else: hdus = _astropy.pyfits.HDUList() for field in fields: hdus.append(self.hdulist[field]) hdus.writeto(fileobj, overwrite=overwrite, **kwargs)
def _dep_field(field, data): if not isinstance(data, FieldDetector): issue_deprecation_warning( ("The \"%s_%s\" field is deprecated. " + "Please use \"%s_%s\" instead.") % (alias_species, suffix, species, suffix)) return data[ftype, "%s_%s" % (species, suffix)]
def _check_deprecated_parameters(): from yt.config import ytcfg from yt.funcs import issue_deprecation_warning if ytcfg.getboolean("yt", "loadfieldplugins"): issue_deprecation_warning( "Found deprecated parameter 'loadfieldplugins' parameter in yt rcfile." )
def __init__(self, filename, args=None, kwargs=None): super(YTUnidentifiedDataType, self).__init__(filename, args, kwargs) # this cannot be imported at the module level (creates circular imports) from yt.funcs import issue_deprecation_warning issue_deprecation_warning( "YTOutputNotIdentified is a deprecated alias for YTUnidentifiedDataType" )
def get_lowest_clumps(clump, clump_list=None): "Return a list of all clumps at the bottom of the index." issue_deprecation_warning( "This function has been deprecated in favor of accessing a " + "clump's leaf nodes via 'clump.leaves'." ) return clump.leaves
def _particle_velocity_relative(field, data): if not isinstance(data, FieldDetector): issue_deprecation_warning( "The 'particle_velocity_relative' field has been deprecated in " + "favor of 'relative_particle_velocity'.") if isinstance(field.name, tuple): return data[field.name[0], 'relative_particle_velocity'] else: return data['relative_particle_velocity']
def simulation(fn, simulation_type, find_outputs=False): from yt.funcs import issue_deprecation_warning issue_deprecation_warning( "yt.simulation is a deprecated alias for yt.load_simulation" "and will be removed in a future version of yt." ) return load_simulation( fn=fn, simulation_type=simulation_type, find_outputs=find_outputs )
def __init__(self, pw): try: # Attempt import from the old WCSAxes package first from wcsaxes import WCSAxes issue_deprecation_warning("Support for the standalone 'wcsaxes' " "package is deprecated since its" "functionality has been merged into" "AstroPy, and will be removed in a " "future release. It is recommended to " "use the version bundled with AstroPy " ">= 1.3.") except ImportError: # Try to use the AstroPy version WCSAxes = _astropy.wcsaxes.WCSAxes if pw.oblique: raise NotImplementedError( "WCS axes are not implemented for oblique plots.") if not hasattr(pw.ds, "wcs_2d"): raise NotImplementedError( "WCS axes are not implemented for this dataset.") if pw.data_source.axis != pw.ds.spec_axis: raise NotImplementedError( "WCS axes are not implemented for this axis.") self.plots = {} self.pw = pw for f in pw.plots: rect = pw.plots[f]._get_best_layout()[1] fig = pw.plots[f].figure ax = fig.axes[0] wcs_ax = WCSAxes(fig, rect, wcs=pw.ds.wcs_2d, frameon=False) fig.add_axes(wcs_ax) wcs = pw.ds.wcs_2d.wcs xax = pw.ds.coordinates.x_axis[pw.data_source.axis] yax = pw.ds.coordinates.y_axis[pw.data_source.axis] xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0], wcs.cunit[xax]) ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0], wcs.cunit[yax]) fp = pw._font_properties wcs_ax.coords[0].set_axislabel(xlabel, fontproperties=fp, minpad=0.5) wcs_ax.coords[1].set_axislabel(ylabel, fontproperties=fp, minpad=0.4) wcs_ax.coords[0].ticklabels.set_fontproperties(fp) wcs_ax.coords[1].ticklabels.set_fontproperties(fp) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) wcs_ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value) wcs_ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value) wcs_ax.coords.frame._update_cache = [] ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) self.plots[f] = fig
def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name, smoothing_length_name, density_name, smoothed_field, registry, nneighbors=64, kernel_name='cubic'): issue_deprecation_warning("This function is deprecated. " + DEP_MSG_SMOOTH_FIELD)
def from_filenames(cls, filenames, parallel=True, setup_function=None, **kwargs): r"""Create a time series from either a filename pattern or a list of filenames. This method provides an easy way to create a :class:`~yt.data_objects.time_series.DatasetSeries`, given a set of filenames or a pattern that matches them. Additionally, it can set the parallelism strategy. Parameters ---------- filenames : list or pattern This can either be a list of filenames (such as ["DD0001/DD0001", "DD0002/DD0002"]) or a pattern to match, such as "DD*/DD*.index"). If it's the former, they will be loaded in order. The latter will be identified with the glob module and then sorted. parallel : True, False or int This parameter governs the behavior when .piter() is called on the resultant DatasetSeries object. If this is set to False, the time series will not iterate in parallel when .piter() is called. If this is set to either True or an integer, it will be iterated with 1 or that integer number of processors assigned to each parameter file provided to the loop. setup_function : callable, accepts a ds This function will be called whenever a dataset is loaded. Examples -------- >>> def print_time(ds): ... print(ds.current_time) ... >>> ts = DatasetSeries.from_filenames( ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0", ... setup_function = print_time) ... >>> for ds in ts: ... SlicePlot(ds, "x", "Density").save() """ issue_deprecation_warning( "DatasetSeries.from_filenames() is deprecated and will be removed " "in a future version of yt. Use DatasetSeries() directly.") obj = cls(filenames, parallel=parallel, setup_function=setup_function, **kwargs) return obj
def _determine_wcs(self): self.wcs = _astropy.pywcs.WCS(naxis=2) self.events_info = {} for k, v in self.primary_header.items(): if k.startswith("TTYP"): if v.lower() in ["x", "y"]: num = k.replace("TTYPE", "") self.events_info[v.lower()] = ( self.primary_header["TLMIN" + num], self.primary_header["TLMAX" + num], self.primary_header["TCTYP" + num], self.primary_header["TCRVL" + num], self.primary_header["TCDLT" + num], self.primary_header["TCRPX" + num], ) elif v.lower() in ["energy", "time"]: num = k.replace("TTYPE", "") unit = self.primary_header["TUNIT" + num].lower() if unit.endswith("ev"): unit = unit.replace("ev", "eV") self.events_info[v.lower()] = unit self.axis_names = [self.events_info[ax][2] for ax in ["x", "y"]] if "reblock" in self.specified_parameters: issue_deprecation_warning( "'reblock' is now a keyword argument that " "can be passed to 'yt.load'. This behavior " "is deprecated.") self.reblock = self.specified_parameters["reblock"] self.wcs.wcs.cdelt = [ self.events_info["x"][4] * self.reblock, self.events_info["y"][4] * self.reblock, ] self.wcs.wcs.crpix = [ (self.events_info["x"][5] - 0.5) / self.reblock + 0.5, (self.events_info["y"][5] - 0.5) / self.reblock + 0.5, ] self.wcs.wcs.ctype = [ self.events_info["x"][2], self.events_info["y"][2] ] self.wcs.wcs.cunit = ["deg", "deg"] self.wcs.wcs.crval = [ self.events_info["x"][3], self.events_info["y"][3] ] self.dims = [ (self.events_info["x"][1] - self.events_info["x"][0]) / self.reblock, (self.events_info["y"][1] - self.events_info["y"][0]) / self.reblock, ] self.ctypes = self.axis_names self.wcs_2d = self.wcs
def to_fits_data(self, fields=None, other_keys=None, length_unit=None, **kwargs): r"""Export the fields in this FixedResolutionBuffer instance to a FITSImageData instance. This will export a set of FITS images of either the fields specified or all the fields already in the object. Parameters ---------- fields : list of strings These fields will be pixelized and output. If "None", the keys of the FRB will be used. other_keys : dictionary, optional A set of header keys and values to write into the FITS header. length_unit : string, optional the length units that the coordinates are written in. The default is to use the default length unit of the dataset. """ from yt.visualization.fits_image import FITSImageData if length_unit is None: length_unit = self.ds.length_unit if "units" in kwargs: issue_deprecation_warning("The 'units' keyword argument has been " "replaced by the 'length_unit' keyword " "argument and the former has been " "deprecated. Setting 'length_unit' " "to 'units'.") length_unit = kwargs.pop("units") if fields is None: fields = list(self.data.keys()) else: fields = ensure_list(fields) if len(fields) == 0: raise RuntimeError( "No fields to export. Either pass a field or list of fields to " "to_fits_data or access a field from the FixedResolutionBuffer " "object.") fid = FITSImageData(self, fields=fields, length_unit=length_unit) if other_keys is not None: for k, v in other_keys.items(): fid.update_all_headers(k, v) return fid
def particle_age(field, data): msg = ("The RAMSES particle_age field has been deprecated since " "it did not actually represent particle ages in all " "cases. To get the time when a particle was formed use " "the particle_birth_time field instead. To get the " "age of a star particle, use the star_age field") if not isinstance(data, FieldDetector): issue_deprecation_warning(msg, stacklevel=2) if data.ds.cosmological_simulation: conformal_age = data[ptype, 'conformal_birth_time'] ret = convert_ramses_ages(data.ds, conformal_age) return data.ds.arr(ret, 'code_time') else: return data[ptype, 'particle_birth_time']
def set_cbar_minorticks(self, field, state): """Deprecated alias, kept for backward compatibility. turn colorbar minor ticks "on" or "off" in the current plot, according to *state* Parameters ---------- field : string the field to remove colorbar minorticks state : string the state indicating 'on' or 'off' """ issue_deprecation_warning("Deprecated alias, use set_colorbar_minorticks instead.") boolstate = {"on": True, "off": False}[state.lower()] return self.set_colorbar_minorticks(field, boolstate)
def _sanitize_sampling_type(sampling_type, particle_type=None): """Detect conflicts between deprecated and new parameters to specify the sampling type in a new field. This is a helper function to add_field methods. Parameters ---------- sampling_type: str One of "cell", "particle" or "local" (case insensitive) particle_type: str This is a deprecated argument of the add_field method, which was replaced by sampling_type. Raises ------ ValueError For unsupported values in sampling_type RuntimeError If conflicting parameters are passed. """ try: sampling_type = sampling_type.lower() except AttributeError as e: raise TypeError("sampling_type should be a string.") from e acceptable_samplings = ("cell", "particle", "local") if sampling_type not in acceptable_samplings: raise ValueError( "Invalid sampling type %s. Valid sampling types are %s", sampling_type, ", ".join(acceptable_samplings), ) if particle_type: issue_deprecation_warning( "'particle_type' keyword argument is deprecated in favour " "of the positional argument 'sampling_type'." ) if sampling_type != "particle": raise RuntimeError( "Conflicting values for parameters " "'sampling_type' and 'particle_type'." ) return sampling_type
def __call__(self, events, prng=None): """ Calling method for :class:`~pyxsim.instruments.InstrumentSimulator`. Parameters ---------- events : :class:`~pyxsim.events.EventList` An EventList instance of unconvolved events. prng : integer or :class:`~numpy.random.RandomState` object A pseudo-random number generator. Typically will only be specified if you have a reason to generate the same set of random numbers, such as for a test. Default is to use the :mod:`numpy.random` module. """ issue_deprecation_warning("The pyXSIM built-in instrument simulators " "have been deprecated and will be removed " "in a future release!") if "pi" in events or "pha" in events: raise RuntimeError( "These events have already been convolved with a response!!") prng = parse_prng(prng) flux = np.sum(events["eobs"]).to("erg") / \ events.parameters["exp_time"]/events.parameters["area"] exp_time = events.parameters["exp_time"] emin = events["eobs"].min().value emax = events["eobs"].max().value new_events = {} new_events.update(events.events) new_events["energy"] = new_events.pop("eobs") new_events = self.arf.detect_events(new_events, exp_time, flux, [emin, emax], prng=prng) new_events = self.rmf.scatter_energies(new_events, prng=prng) new_events["eobs"] = new_events.pop("energy") chantype = self.rmf.header["CHANTYPE"].lower() new_events[chantype] = new_events.pop(self.rmf.header["CHANTYPE"]) parameters = {} parameters.update(events.parameters) parameters["channel_type"] = chantype parameters["mission"] = self.rmf.header.get("MISSION", "") parameters["instrument"] = self.rmf.header["INSTRUME"] parameters["telescope"] = self.rmf.header["TELESCOP"] parameters["arf"] = self.arf.filename parameters["rmf"] = self.rmf.filename return ConvolvedEventList(new_events, parameters)
def __call__(self, events, prng=None): """ Calling method for :class:`~pyxsim.instruments.InstrumentSimulator`. Parameters ---------- events : :class:`~pyxsim.events.EventList` An EventList instance of unconvolved events. prng : integer or :class:`~numpy.random.RandomState` object A pseudo-random number generator. Typically will only be specified if you have a reason to generate the same set of random numbers, such as for a test. Default is to use the :mod:`numpy.random` module. """ issue_deprecation_warning("The pyXSIM built-in instrument simulators " "have been deprecated and will be removed " "in a future release!") if "pi" in events or "pha" in events: raise RuntimeError("These events have already been convolved with a response!!") prng = parse_prng(prng) flux = np.sum(events["eobs"]).to("erg") / \ events.parameters["exp_time"]/events.parameters["area"] exp_time = events.parameters["exp_time"] emin = events["eobs"].min().value emax = events["eobs"].max().value new_events = {} new_events.update(events.events) new_events["energy"] = new_events.pop("eobs") new_events = self.arf.detect_events(new_events, exp_time, flux, [emin, emax], prng=prng) new_events = self.rmf.scatter_energies(new_events, prng=prng) new_events["eobs"] = new_events.pop("energy") chantype = self.rmf.header["CHANTYPE"].lower() new_events[chantype] = new_events.pop(self.rmf.header["CHANTYPE"]) parameters = {} parameters.update(events.parameters) parameters["channel_type"] = chantype parameters["mission"] = self.rmf.header.get("MISSION", "") parameters["instrument"] = self.rmf.header["INSTRUME"] parameters["telescope"] = self.rmf.header["TELESCOP"] parameters["arf"] = self.arf.filename parameters["rmf"] = self.rmf.filename return ConvolvedEventList(new_events, parameters)
def apply_to_stream(self, overwrite=False, **kwargs): """ Apply the particles to a grid-based stream dataset. If particles already exist, and overwrite=False, do not overwrite them, but add the new ones to them. """ if "clobber" in kwargs: issue_deprecation_warning( 'The "clobber" keyword argument ' 'is deprecated. Use the "overwrite" ' "argument, which has the same effect, " "instead." ) overwrite = kwargs.pop("clobber") grid_data = [] for i, g in enumerate(self.ds.index.grids): data = {} number_of_particles = self.NumberOfParticles[i] if not overwrite: number_of_particles += g.NumberOfParticles grid_particles = self.get_for_grid(g) for field in self.field_list: if number_of_particles > 0: if ( g.NumberOfParticles > 0 and not overwrite and field in self.ds.field_list ): # We have particles in this grid, we're not # overwriting them, and the field is in the field # list already data[field] = uconcatenate([g[field], grid_particles[field]]) else: # Otherwise, simply add the field in data[field] = grid_particles[field] else: # We don't have particles in this grid data[field] = np.array([], dtype="float64") grid_data.append(data) self.ds.index.update_data(grid_data)
def export_fits( self, filename, fields=None, overwrite=False, other_keys=None, length_unit=None, **kwargs, ): r"""Export a set of pixelized fields to a FITS file. This will export a set of FITS images of either the fields specified or all the fields already in the object. Parameters ---------- filename : string The name of the FITS file to be written. fields : list of strings These fields will be pixelized and output. If "None", the keys of the FRB will be used. overwrite : boolean If the file exists, this governs whether we will overwrite. other_keys : dictionary, optional A set of header keys and values to write into the FITS header. length_unit : string, optional the length units that the coordinates are written in. The default is to use the default length unit of the dataset. """ issue_deprecation_warning( "The 'export_fits' method of " "FixedResolutionBuffer is deprecated. " "Use the 'to_fits_data' method to create " "a FITSImageData instance and then " "use its `writeto` method." ) fid = self.to_fits_data( fields=fields, other_keys=other_keys, length_unit=length_unit, **kwargs ) fid.writeto(filename, overwrite=overwrite, **kwargs)
def can_run_sim(sim_fn, sim_type, file_check=False): issue_deprecation_warning("This function is no longer used in the " + "yt project testing framework and is " + "targeted for deprecation.") result_storage = AnswerTestingTest.result_storage if isinstance(sim_fn, SimulationTimeSeries): return result_storage is not None path = ytcfg.get("yt", "test_data_dir") if not os.path.isdir(path): return False if file_check: return os.path.isfile(os.path.join( path, sim_fn)) and result_storage is not None try: load_simulation(sim_fn, sim_type) except FileNotFoundError: if ytcfg.getboolean("yt", "__strict_requires"): if result_storage is not None: result_storage["tainted"] = True raise return False return result_storage is not None
def set_minorticks(self, field, state): """Turn minor ticks on or off in the current plot. Displaying minor ticks reduces performance; turn them off using set_minorticks('all', False) if drawing speed is a problem. Parameters ---------- field : string the field to remove minorticks if field == 'all', applies to all plots. state : bool the state indicating 'on' (True) or 'off' (False) """ if isinstance(state, str): from yt.funcs import issue_deprecation_warning issue_deprecation_warning("Deprecated api, use bools for *state*.") state = {"on": True, "off": False}[state.lower()] self._minorticks[field] = state return self
def write_projection( data, filename, colorbar=True, colorbar_label=None, title=None, vmin=None, vmax=None, limits=None, take_log=True, figsize=(8, 6), dpi=100, cmap_name=None, extent=None, xlabel=None, ylabel=None, ): r"""Write a projection or volume rendering to disk with a variety of pretty parameters such as limits, title, colorbar, etc. write_projection uses the standard matplotlib interface to create the figure. N.B. This code only works *after* you have created the projection using the standard framework (i.e. the Camera interface or off_axis_projection). Accepts an NxM sized array representing the projection itself as well as the filename to which you will save this figure. Note that the final resolution of your image will be a product of dpi/100 * figsize. Parameters ---------- data : array_like image array as output by off_axis_projection or camera.snapshot() filename : string the filename where the data will be saved colorbar : boolean do you want a colorbar generated to the right of the image? colorbar_label : string the label associated with your colorbar title : string the label at the top of the figure vmin : float or None the lower limit of the zaxis (part of matplotlib api) vmax : float or None the lower limit of the zaxis (part of matplotlib api) take_log : boolean plot the log of the data array (and take the log of the limits if set)? figsize : array_like width, height in inches of final image dpi : int final image resolution in pixels / inch cmap_name : string The name of the colormap. Examples -------- >>> image = off_axis_projection(ds, c, L, W, N, "Density", no_ghost=False) >>> write_projection(image, 'test.png', colorbar_label="Column Density (cm$^{-2}$)", title="Offaxis Projection", vmin=1e-5, vmax=1e-3, take_log=True) """ if cmap_name is None: cmap_name = ytcfg.get("yt", "default_colormap") import matplotlib.colors import matplotlib.figure from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS if limits is not None: if vmin is not None or vmax is not None: raise ValueError( "The `limits` keyword argument is deprecated and can not " "be used simultaneously with `vmin` or `vmax`." ) issue_deprecation_warning( "The `limits` keyword argument is deprecated and will " "be removed in a future version of yt. Use `vmin` and `vmax` instead." ) vmin, vmax = limits # If this is rendered as log, then apply now. if take_log: norm_cls = matplotlib.colors.LogNorm else: norm_cls = matplotlib.colors.Normalize norm = norm_cls(vmin=vmin, vmax=vmax) # Create the figure and paint the data on fig = matplotlib.figure.Figure(figsize=figsize) ax = fig.add_subplot(111) cax = ax.imshow(data.to_ndarray(), norm=norm, extent=extent, cmap=cmap_name,) if title: ax.set_title(title) if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) # Suppress the x and y pixel counts if extent is None: ax.set_xticks(()) ax.set_yticks(()) # Add a color bar and label if requested if colorbar: cbar = fig.colorbar(cax) if colorbar_label: cbar.ax.set_ylabel(colorbar_label) suffix = get_image_suffix(filename) if suffix == "": suffix = ".png" filename = f"{filename}{suffix}" mylog.info("Saving plot %s", filename) if suffix == ".pdf": canvas = FigureCanvasPdf(fig) elif suffix in (".eps", ".ps"): canvas = FigureCanvasPS(fig) else: canvas = FigureCanvasAgg(fig) fig.tight_layout() canvas.print_figure(filename, dpi=dpi) return filename
def __init__(self, events, parameters): issue_deprecation_warning("ConvolvedEventList has been " "deprecated and will be removed " "in a future release!") super(ConvolvedEventList, self).__init__(events, parameters)
def variance(self): issue_deprecation_warning(""" profile.variance incorrectly returns the profile standard deviation and has been deprecated, use profile.standard_deviation instead.""") return self.standard_deviation
""" API for yt.analysis_modules.photon_simulator. """ #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- from yt.funcs import issue_deprecation_warning issue_deprecation_warning("The photon_simulator module is deprecated. Please use pyXSIM " "(http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim) instead.") from .photon_models import \ PhotonModel, \ ThermalPhotonModel from .photon_simulator import \ PhotonList, \ EventList, \ merge_files, \ convert_old_file from .spectral_models import \ SpectralModel, \ XSpecThermalModel, \ XSpecAbsorbModel, \
def add_xray_emissivity_field(ds, e_min, e_max, redshift=0.0, metallicity=("gas", "metallicity"), table_type="cloudy", data_dir=None, cosmology=None, **kwargs): r"""Create X-ray emissivity fields for a given energy range. Parameters ---------- e_min : float The minimum energy in keV for the energy band. e_min : float The maximum energy in keV for the energy band. redshift : float, optional The cosmological redshift of the source of the field. Default: 0.0. metallicity : str or tuple of str or float, optional Either the name of a metallicity field or a single floating-point number specifying a spatially constant metallicity. Must be in solar units. If set to None, no metals will be assumed. Default: ("gas", "metallicity") table_type : string, optional The type of emissivity table to be used when creating the fields. Options are "cloudy" or "apec". Default: "cloudy" data_dir : string, optional The location to look for the data table in. If not supplied, the file will be looked for in the location of the YT_DEST environment variable or in the current working directory. cosmology : :class:`~yt.utilities.cosmology.Cosmology`, optional If set and redshift > 0.0, this cosmology will be used when computing the cosmological dependence of the emission fields. If not set, yt's default LCDM cosmology will be used. This will create three fields: "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3) "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1) "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3) Examples -------- >>> import yt >>> ds = yt.load("sloshing_nomag2_hdf5_plt_cnt_0100") >>> yt.add_xray_emissivity_field(ds, 0.5, 2) >>> p = yt.ProjectionPlot(ds, 'x', "xray_emissivity_0.5_2_keV") >>> p.save() """ # The next several if constructs are for backwards-compatibility if "constant_metallicity" in kwargs: issue_deprecation_warning("The \"constant_metallicity\" parameter is deprecated. Set " "the \"metallicity\" parameter to a constant float value instead.") metallicity = kwargs["constant_metallicity"] if "with_metals" in kwargs: issue_deprecation_warning("The \"with_metals\" parameter is deprecated. Use the " "\"metallicity\" parameter to choose a constant or " "spatially varying metallicity.") if kwargs["with_metals"] and isinstance(metallicity, float): raise RuntimeError("\"with_metals=True\", but you specified a constant metallicity!") if not kwargs["with_metals"] and not isinstance(metallicity, float): raise RuntimeError("\"with_metals=False\", but you didn't specify a constant metallicity!") if not isinstance(metallicity, float) and metallicity is not None: try: metallicity = ds._get_field_info(*metallicity) except YTFieldNotFound: raise RuntimeError("Your dataset does not have a {} field! ".format(metallicity) + "Perhaps you should specify a constant metallicity instead?") my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, redshift=redshift) em_0 = my_si.get_interpolator("primordial", e_min, e_max) emp_0 = my_si.get_interpolator("primordial", e_min, e_max, energy=False) if metallicity is not None: em_Z = my_si.get_interpolator("metals", e_min, e_max) emp_Z = my_si.get_interpolator("metals", e_min, e_max, energy=False) def _emissivity_field(field, data): with np.errstate(all='ignore'): dd = {"log_nH": np.log10(data["gas", "H_nuclei_density"]), "log_T": np.log10(data["gas", "temperature"])} my_emissivity = np.power(10, em_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): my_Z = data[metallicity.name] else: my_Z = metallicity my_emissivity += my_Z * np.power(10, em_Z(dd)) my_emissivity[np.isnan(my_emissivity)] = 0 return data["gas","H_nuclei_density"]**2 * \ YTArray(my_emissivity, "erg*cm**3/s") emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max) ds.add_field(("gas", emiss_name), function=_emissivity_field, display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max), sampling_type="cell", units="erg/cm**3/s") def _luminosity_field(field, data): return data[emiss_name] * data["cell_volume"] lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max) ds.add_field(("gas", lum_name), function=_luminosity_field, display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max), sampling_type="cell", units="erg/s") def _photon_emissivity_field(field, data): dd = {"log_nH": np.log10(data["gas", "H_nuclei_density"]), "log_T": np.log10(data["gas", "temperature"])} my_emissivity = np.power(10, emp_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): my_Z = data[metallicity.name] else: my_Z = metallicity my_emissivity += my_Z * np.power(10, emp_Z(dd)) return data["gas", "H_nuclei_density"]**2 * \ YTArray(my_emissivity, "photons*cm**3/s") phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max) ds.add_field(("gas", phot_name), function=_photon_emissivity_field, display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max), sampling_type="cell", units="photons/cm**3/s") fields = [emiss_name, lum_name, phot_name] if redshift > 0.0: if cosmology is None: if hasattr(ds, "cosmology"): cosmology = ds.cosmology else: cosmology = Cosmology() D_L = cosmology.luminosity_distance(0.0, redshift) angular_scale = 1.0/cosmology.angular_scale(0.0, redshift) dist_fac = 1.0/(4.0*np.pi*D_L*D_L*angular_scale*angular_scale) ei_name = "xray_intensity_%s_%s_keV" % (e_min, e_max) def _intensity_field(field, data): I = dist_fac*data[emiss_name] return I.in_units("erg/cm**3/s/arcsec**2") ds.add_field(("gas", ei_name), function=_intensity_field, display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max), sampling_type="cell", units="erg/cm**3/s/arcsec**2") i_name = "xray_photon_intensity_%s_%s_keV" % (e_min, e_max) def _photon_intensity_field(field, data): I = (1.0+redshift)*dist_fac*data[phot_name] return I.in_units("photons/cm**3/s/arcsec**2") ds.add_field(("gas", i_name), function=_photon_intensity_field, display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max), sampling_type="cell", units="photons/cm**3/s/arcsec**2") fields += [ei_name, i_name] [mylog.info("Adding %s field." % field) for field in fields] return fields
def __init__( self, data, fields=None, length_unit=None, width=None, img_ctr=None, wcs=None, current_time=None, time_unit=None, mass_unit=None, velocity_unit=None, magnetic_unit=None, ds=None, unit_header=None, **kwargs, ): r""" Initialize a FITSImageData object. FITSImageData contains a collection of FITS ImageHDU instances and WCS information, along with units for each of the images. FITSImageData instances can be constructed from ImageArrays, NumPy arrays, dicts of such arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter two are the most powerful because WCS information can be constructed automatically from their coordinates. Parameters ---------- data : FixedResolutionBuffer or a YTCoveringGrid. Or, an ImageArray, an numpy.ndarray, or dict of such arrays The data to be made into a FITS image or images. fields : single string or list of strings, optional The field names for the data. If *fields* is none and *data* has keys, it will use these for the fields. If *data* is just a single array one field name must be specified. length_unit : string The units of the WCS coordinates and the length unit of the file. Defaults to the length unit of the dataset, if there is one, or "cm" if there is not. width : float or YTQuantity The width of the image. Either a single value or iterable of values. If a float, assumed to be in *units*. Only used if this information is not already provided by *data*. img_ctr : array_like or YTArray The center coordinates of the image. If a list or NumPy array, it is assumed to be in *units*. Only used if this information is not already provided by *data*. wcs : `~astropy.wcs.WCS` instance, optional Supply an AstroPy WCS instance. Will override automatic WCS creation from FixedResolutionBuffers and YTCoveringGrids. current_time : float, tuple, or YTQuantity, optional The current time of the image(s). If not specified, one will be set from the dataset if there is one. If a float, it will be assumed to be in *time_unit* units. time_unit : string The default time units of the file. Defaults to "s". mass_unit : string The default time units of the file. Defaults to "g". velocity_unit : string The default velocity units of the file. Defaults to "cm/s". magnetic_unit : string The default magnetic units of the file. Defaults to "gauss". ds : `~yt.static_output.Dataset` instance, optional The dataset associated with the image(s), typically used to transfer metadata to the header(s). Does not need to be specified if *data* has a dataset as an attribute. Examples -------- >>> # This example uses a FRB. >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150") >>> prj = ds.proj(2, "kT", weight_field="density") >>> frb = prj.to_frb((0.5, "Mpc"), 800) >>> # This example just uses the FRB and puts the coords in kpc. >>> f_kpc = FITSImageData(frb, fields="kT", length_unit="kpc", ... time_unit=(1.0, "Gyr")) >>> # This example specifies a specific WCS. >>> from astropy.wcs import WCS >>> w = WCS(naxis=self.dimensionality) >>> w.wcs.crval = [30., 45.] # RA, Dec in degrees >>> w.wcs.cunit = ["deg"]*2 >>> nx, ny = 800, 800 >>> w.wcs.crpix = [0.5*(nx+1), 0.5*(ny+1)] >>> w.wcs.ctype = ["RA---TAN","DEC--TAN"] >>> scale = 1./3600. # One arcsec per pixel >>> w.wcs.cdelt = [-scale, scale] >>> f_deg = FITSImageData(frb, fields="kT", wcs=w) >>> f_deg.writeto("temp.fits") """ if fields is not None: fields = ensure_list(fields) if "units" in kwargs: issue_deprecation_warning( "The 'units' keyword argument has been replaced " "by the 'length_unit' keyword argument and the " "former has been deprecated. Setting 'length_unit' " "to 'units'.") length_unit = kwargs.pop("units") if ds is None: ds = getattr(data, "ds", None) self.fields = [] self.field_units = {} if unit_header is None: self._set_units(ds, [ length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit ]) else: self._set_units_from_header(unit_header) wcs_unit = str(self.length_unit.units) self._fix_current_time(ds, current_time) if width is None: width = 1.0 if isinstance(width, tuple): if ds is None: width = YTQuantity(width[0], width[1]) else: width = ds.quan(width[0], width[1]) if img_ctr is None: img_ctr = np.zeros(3) exclude_fields = [ "x", "y", "z", "px", "py", "pz", "pdx", "pdy", "pdz", "weight_field", ] if isinstance(data, _astropy.pyfits.PrimaryHDU): data = _astropy.pyfits.HDUList([data]) if isinstance(data, _astropy.pyfits.HDUList): self.hdulist = data for hdu in data: self.fields.append(hdu.header["btype"]) self.field_units[hdu.header["btype"]] = hdu.header["bunit"] self.shape = self.hdulist[0].shape self.dimensionality = len(self.shape) wcs_names = [ key for key in self.hdulist[0].header if "WCSNAME" in key ] for name in wcs_names: if name == "WCSNAME": key = " " else: key = name[-1] w = _astropy.pywcs.WCS(header=self.hdulist[0].header, key=key, naxis=self.dimensionality) setattr(self, "wcs" + key.strip().lower(), w) return self.hdulist = _astropy.pyfits.HDUList() if hasattr(data, "keys"): img_data = data if fields is None: fields = list(img_data.keys()) elif isinstance(data, np.ndarray): if fields is None: mylog.warning("No field name given for this array. " "Calling it 'image_data'.") fn = "image_data" fields = [fn] else: fn = fields[0] img_data = {fn: data} for fd in fields: if isinstance(fd, tuple): self.fields.append(fd[1]) elif isinstance(fd, DerivedField): self.fields.append(fd.name[1]) else: self.fields.append(fd) # Sanity checking names s = set() duplicates = set(f for f in self.fields if f in s or s.add(f)) if len(duplicates) > 0: for i, fd in enumerate(self.fields): if fd in duplicates: if isinstance(fields[i], tuple): ftype, fname = fields[i] elif isinstance(fields[i], DerivedField): ftype, fname = fields[i].name else: raise RuntimeError("Cannot distinguish between fields " "with same name %s!" % fd) self.fields[i] = "%s_%s" % (ftype, fname) first = True for i, name, field in zip(count(), self.fields, fields): if name not in exclude_fields: this_img = img_data[field] if hasattr(img_data[field], "units"): if this_img.units.is_code_unit: mylog.warning("Cannot generate an image with code " "units. Converting to units in CGS.") funits = this_img.units.get_base_equivalent("cgs") else: funits = this_img.units self.field_units[name] = str(funits) else: self.field_units[name] = "dimensionless" mylog.info("Making a FITS image of field %s", name) if isinstance(this_img, ImageArray): if i == 0: self.shape = this_img.shape[::-1] this_img = np.asarray(this_img) else: if i == 0: self.shape = this_img.shape this_img = np.asarray(this_img.T) if first: hdu = _astropy.pyfits.PrimaryHDU(this_img) first = False else: hdu = _astropy.pyfits.ImageHDU(this_img) hdu.name = name hdu.header["btype"] = name hdu.header["bunit"] = re.sub("()", "", self.field_units[name]) for unit in ("length", "time", "mass", "velocity", "magnetic"): if unit == "magnetic": short_unit = "bf" else: short_unit = unit[0] key = "{}unit".format(short_unit) value = getattr(self, "{}_unit".format(unit)) if value is not None: hdu.header[key] = float(value.value) hdu.header.comments[key] = "[%s]" % value.units hdu.header["time"] = float(self.current_time.value) self.hdulist.append(hdu) self.dimensionality = len(self.shape) if wcs is None: w = _astropy.pywcs.WCS(header=self.hdulist[0].header, naxis=self.dimensionality) # FRBs and covering grids are special cases where # we have coordinate information, so we take advantage # of this and construct the WCS object if isinstance(img_data, FixedResolutionBuffer): dx = (img_data.bounds[1] - img_data.bounds[0]).to_value(wcs_unit) dy = (img_data.bounds[3] - img_data.bounds[2]).to_value(wcs_unit) dx /= self.shape[0] dy /= self.shape[1] xctr = 0.5 * (img_data.bounds[1] + img_data.bounds[0]).to_value(wcs_unit) yctr = 0.5 * (img_data.bounds[3] + img_data.bounds[2]).to_value(wcs_unit) center = [xctr, yctr] cdelt = [dx, dy] elif isinstance(img_data, YTCoveringGrid): cdelt = img_data.dds.to_value(wcs_unit) center = 0.5 * (img_data.left_edge + img_data.right_edge).to_value(wcs_unit) else: # If img_data is just an array we use the width and img_ctr # parameters to determine the cell widths if not iterable(width): width = [width] * self.dimensionality if isinstance(width[0], YTQuantity): cdelt = [ wh.to_value(wcs_unit) / n for wh, n in zip(width, self.shape) ] else: cdelt = [float(wh) / n for wh, n in zip(width, self.shape)] center = img_ctr[:self.dimensionality] w.wcs.crpix = 0.5 * (np.array(self.shape) + 1) w.wcs.crval = center w.wcs.cdelt = cdelt w.wcs.ctype = ["linear"] * self.dimensionality w.wcs.cunit = [wcs_unit] * self.dimensionality self.set_wcs(w) else: self.set_wcs(wcs)
def write_to_gdf(ds, gdf_path, fields=None, data_author=None, data_comment=None, dataset_units=None, particle_type_name="dark_matter", overwrite=False, **kwargs): """ Write a dataset to the given path in the Grid Data Format. Parameters ---------- ds : Dataset object The yt data to write out. gdf_path : string The path of the file to output. fields The field or list of fields to write out. If None, defaults to ds.field_list. data_author : string, optional The name of the author who wrote the data. Default: None. data_comment : string, optional A descriptive comment. Default: None. dataset_units : dictionary, optional A dictionary of (value, unit) tuples to set the default units of the dataset. Keys can be: * "length_unit" * "time_unit" * "mass_unit" * "velocity_unit" * "magnetic_unit" If not specified, these will carry over from the parent dataset. particle_type_name : string, optional The particle type of the particles in the dataset. Default: "dark_matter" overwrite : boolean, optional Whether or not to overwrite an already existing file. If False, attempting to overwrite an existing file will result in an exception. Examples -------- >>> dataset_units = {"length_unit":(1.0,"Mpc"), ... "time_unit":(1.0,"Myr")} >>> write_to_gdf(ds, "clumps.h5", data_author="John ZuHone", ... dataset_units=dataset_units, ... data_comment="My Really Cool Dataset", overwrite=True) """ if "clobber" in kwargs: issue_deprecation_warning("The \"clobber\" keyword argument " "is deprecated. Use the \"overwrite\" " "argument, which has the same effect, " "instead.") overwrite = kwargs.pop("clobber") if fields is None: fields = ds.field_list fields = ensure_list(fields) with _create_new_gdf(ds, gdf_path, data_author, data_comment, dataset_units=dataset_units, particle_type_name=particle_type_name, overwrite=overwrite) as f: # now add the fields one-by-one _write_fields_to_gdf(ds, f, fields, particle_type_name)
""" API for halo_mass_function """ #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- from yt.funcs import issue_deprecation_warning issue_deprecation_warning( "The halo_mass_function module does not function correctly and has been " "deprecated. This code has been moved to the yt attic " "(https://bitbucket.org/yt_analysis/yt_attic) and will be removed in a " "future release.") from .halo_mass_function import \ HaloMassFcn, \ TransferFunction, \ integrate_inf
def _create_new_gdf(ds, gdf_path, data_author=None, data_comment=None, dataset_units=None, particle_type_name="dark_matter", overwrite=False, **kwargs): if "clobber" in kwargs: issue_deprecation_warning("The \"clobber\" keyword argument " "is deprecated. Use the \"overwrite\" " "argument, which has the same effect, " "instead.") overwrite = kwargs.pop("clobber") # Make sure we have the absolute path to the file first gdf_path = os.path.abspath(gdf_path) # Is the file already there? If so, are we allowing # overwriting? if os.path.exists(gdf_path) and not overwrite: raise YTGDFAlreadyExists(gdf_path) ### # Create and open the file with h5py. We use parallel # h5py if it is available. ### if communication_system.communicators[-1].size > 1 and \ h5py.get_config().mpi: mpi4py_communicator = communication_system.communicators[-1].comm f = h5py.File(gdf_path, mode="w", driver='mpio', comm=mpi4py_communicator) else: f = h5py.File(gdf_path, mode="w") ### # "gridded_data_format" group ### g = f.create_group("gridded_data_format") g.attrs["data_software"] = "yt" g.attrs["data_software_version"] = yt_version if data_author is not None: g.attrs["data_author"] = data_author if data_comment is not None: g.attrs["data_comment"] = data_comment ### # "simulation_parameters" group ### g = f.create_group("simulation_parameters") g.attrs["refine_by"] = ds.refine_by g.attrs["dimensionality"] = ds.dimensionality g.attrs["domain_dimensions"] = ds.domain_dimensions g.attrs["current_time"] = ds.current_time g.attrs["domain_left_edge"] = ds.domain_left_edge g.attrs["domain_right_edge"] = ds.domain_right_edge g.attrs["unique_identifier"] = ds.unique_identifier g.attrs["cosmological_simulation"] = ds.cosmological_simulation # @todo: Where is this in the yt API? g.attrs["num_ghost_zones"] = 0 # @todo: Where is this in the yt API? g.attrs["field_ordering"] = 0 # @todo: not yet supported by yt. g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32') if ds.cosmological_simulation: g.attrs["current_redshift"] = ds.current_redshift g.attrs["omega_matter"] = ds.omega_matter g.attrs["omega_lambda"] = ds.omega_lambda g.attrs["hubble_constant"] = ds.hubble_constant if dataset_units is None: dataset_units = {} g = f.create_group("dataset_units") for u in ["length", "time", "mass", "velocity", "magnetic"]: unit_name = u + "_unit" if unit_name in dataset_units: value, units = dataset_units[unit_name] else: attr = getattr(ds, unit_name) value = float(attr) units = str(attr.units) d = g.create_dataset(unit_name, data=value) d.attrs["unit"] = units ### # "field_types" group ### g = f.create_group("field_types") ### # "particle_types" group ### g = f.create_group("particle_types") # @todo: Particle type iterator sg = g.create_group(particle_type_name) sg["particle_type_name"] = np.string_(particle_type_name) ### # root datasets -- info about the grids ### f["grid_dimensions"] = ds.index.grid_dimensions f["grid_left_index"] = np.array([ grid.get_global_startindex() for grid in ds.index.grids ]).reshape(ds.index.grid_dimensions.shape[0], 3) f["grid_level"] = ds.index.grid_levels.flat # @todo: Fill with proper values f["grid_parent_id"] = -np.ones(ds.index.grid_dimensions.shape[0]) f["grid_particle_count"] = ds.index.grid_particle_count ### # "data" group -- where we should spend the most time ### g = f.create_group("data") for grid in ds.index.grids: # add group for this grid grid_group = g.create_group("grid_%010i" % (grid.id - grid._id_offset)) # add group for the particles on this grid particles_group = grid_group.create_group("particles") particles_group.create_group(particle_type_name) yield f # close the file when done f.close()
def project_photons(self, normal, sky_center, absorb_model=None, nH=None, no_shifting=False, north_vector=None, sigma_pos=None, kernel="top_hat", prng=None, **kwargs): r""" Projects photons onto an image plane given a line of sight. Returns a new :class:`~pyxsim.event_list.EventList`. Parameters ---------- normal : character or array-like Normal vector to the plane of projection. If "x", "y", or "z", will assume to be along that axis (and will probably be faster). Otherwise, should be an off-axis normal vector, e.g [1.0, 2.0, -3.0] sky_center : array-like Center RA, Dec of the events in degrees. absorb_model : string or :class:`~pyxsim.spectral_models.AbsorptionModel` A model for foreground galactic absorption, to simulate the absorption of events before being detected. This cannot be applied here if you already did this step previously in the creation of the :class:`~pyxsim.photon_list.PhotonList` instance. Known options for strings are "wabs" and "tbabs". nH : float, optional The foreground column density in units of 10^22 cm^{-2}. Only used if absorption is applied. no_shifting : boolean, optional If set, the photon energies will not be Doppler shifted. north_vector : a sequence of floats A vector defining the "up" direction. This option sets the orientation of the plane of projection. If not set, an arbitrary grid-aligned north_vector is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or "z") is explicitly specified. sigma_pos : float, optional Apply a gaussian smoothing operation to the sky positions of the events. This may be useful when the binned events appear blocky due to their uniform distribution within simulation cells. However, this will move the events away from their originating position on the sky, and so may distort surface brightness profiles and/or spectra. Should probably only be used for visualization purposes. Supply a float here to smooth with a standard deviation with this fraction of the cell size. Default: None kernel : string, optional The kernel used when smoothing positions of X-rays originating from SPH particles, "gaussian" or "top_hat". Default: "top_hat". prng : integer or :class:`~numpy.random.RandomState` object A pseudo-random number generator. Typically will only be specified if you have a reason to generate the same set of random numbers, such as for a test. Default is to use the :mod:`numpy.random` module. Examples -------- >>> L = np.array([0.1,-0.2,0.3]) >>> events = my_photons.project_photons(L, [30., 45.]) """ prng = parse_prng(prng) scale_shift = -1.0/clight.to("km/s") if "smooth_positions" in kwargs: issue_deprecation_warning("'smooth_positions' has been renamed to " "'sigma_pos' and the former is deprecated!") sigma_pos = kwargs["smooth_positions"] if "redshift_new" in kwargs or "area_new" in kwargs or \ "exp_time_new" in kwargs or "dist_new" in kwargs: issue_deprecation_warning("Changing the redshift, distance, area, or " "exposure time has been deprecated in " "project_photons!") if sigma_pos is not None and self.parameters["data_type"] == "particles": raise RuntimeError("The 'smooth_positions' argument should not be used with " "particle-based datasets!") if isinstance(absorb_model, string_types): if absorb_model not in absorb_models: raise KeyError("%s is not a known absorption model!" % absorb_model) absorb_model = absorb_models[absorb_model] if absorb_model is not None: if nH is None: raise RuntimeError("You specified an absorption model, but didn't " "specify a value for nH!") absorb_model = absorb_model(nH) sky_center = YTArray(sky_center, "degree") n_ph = self.photons["num_photons"] if not isinstance(normal, string_types): L = np.array(normal) orient = Orientation(L, north_vector=north_vector) x_hat = orient.unit_vectors[0] y_hat = orient.unit_vectors[1] z_hat = orient.unit_vectors[2] else: x_hat = np.zeros(3) y_hat = np.zeros(3) z_hat = np.zeros(3) parameters = {} D_A = self.parameters["fid_d_a"] events = {} eobs = self.photons["energy"].v if not no_shifting: if comm.rank == 0: mylog.info("Doppler-shifting photon energies.") if isinstance(normal, string_types): shift = self.photons["vel"][:,"xyz".index(normal)]*scale_shift else: shift = np.dot(self.photons["vel"], z_hat)*scale_shift doppler_shift(shift, n_ph, eobs) if absorb_model is None: det = np.ones(eobs.size, dtype='bool') num_det = eobs.size else: if comm.rank == 0: mylog.info("Foreground galactic absorption: using " "the %s model and nH = %g." % (absorb_model._name, nH)) det = absorb_model.absorb_photons(eobs, prng=prng) num_det = det.sum() events["eobs"] = YTArray(eobs[det], "keV") num_events = comm.mpi_allreduce(num_det) if comm.rank == 0: mylog.info("%d events have been detected." % num_events) if num_det > 0: if comm.rank == 0: mylog.info("Assigning positions to events.") if isinstance(normal, string_types): norm = "xyz".index(normal) else: norm = normal xsky, ysky = scatter_events(norm, prng, kernel, self.parameters["data_type"], num_det, det, self.photons["num_photons"], self.photons["pos"].d, self.photons["dx"].d, x_hat, y_hat) if self.parameters["data_type"] == "cells" and sigma_pos is not None: if comm.rank == 0: mylog.info("Optionally smoothing sky positions.") sigma = sigma_pos*np.repeat(self.photons["dx"].d, n_ph)[det] xsky += sigma * prng.normal(loc=0.0, scale=1.0, size=num_det) ysky += sigma * prng.normal(loc=0.0, scale=1.0, size=num_det) d_a = D_A.to("kpc").v xsky /= d_a ysky /= d_a if comm.rank == 0: mylog.info("Converting pixel to sky coordinates.") pixel_to_cel(xsky, ysky, sky_center) else: xsky = [] ysky = [] events["xsky"] = YTArray(xsky, "degree") events["ysky"] = YTArray(ysky, "degree") parameters["exp_time"] = self.parameters["fid_exp_time"] parameters["area"] = self.parameters["fid_area"] parameters["sky_center"] = sky_center return EventList(events, parameters)