Esempio n. 1
0
File: data.py Progetto: nplee/sherpa
    def apply(self, array):
        """
        Apply this filter to an array

        Parameters
        ----------
        array : array_like
            Array to be filtered

        Returns
        -------
        array_like : filtered array
        """
        if array is None:
            return

        if self.mask is False:
            raise DataErr('notmask')

        if self.mask is not True:  # mask is not False and not True, so it's something else we'll try to use as an array
            array = numpy.asarray(array)
            if array.shape != self.mask.shape:
                raise DataErr('mismatch', 'mask', 'data array')
            return array[self.mask]

        return array
Esempio n. 2
0
    def notice(self, mins, maxes, axislist, ignore=False):

        ignore = bool_cast(ignore)
        if str in [type(min) for min in mins]:
            raise DataErr('typecheck', 'lower bound')
        elif str in [type(max) for max in maxes]:
            raise DataErr('typecheck', 'upper bound')
        elif str in [type(axis) for axis in axislist]:
            raise DataErr('typecheck', 'grid')

        mask = filter_bins(mins, maxes, axislist)

        if mask is None:
            self.mask = not ignore
        elif not ignore:
            if self.mask is True:
                self.mask = mask
            else:
                self.mask |= mask
        else:
            mask = ~mask
            if self.mask is False:
                self.mask = mask
            else:
                self.mask &= mask
Esempio n. 3
0
 def apply_filter(self, data):
     if data is not None:
         if self.filter is not None:
             if callable(self.filter):
                 data = self.filter(data)
             else:
                 data = data[self.filter]
         elif self.mask is not True:
             if self.mask is False:
                 raise DataErr('notmask')
             data = numpy.asarray(data)
             if data.shape != self.mask.shape:
                 raise DataErr('mismatch', 'mask', 'data array')
             data = data[self.mask]
     return data
Esempio n. 4
0
 def mask(self, val):
     if (val is True) or (val is False):
         self._mask = val
     elif (val is None) or numpy.isscalar(val):
         raise DataErr('ismask')
     else:
         self._mask = numpy.asarray(val, numpy.bool_)
Esempio n. 5
0
    def __init__(self, pha):
        self.pha = pha
        arf, rmf = pha.get_response()
        if arf is None and rmf is None:
            raise DataErr('norsp', pha.name)

        NoNewAttributesAfterInit.__init__(self)
Esempio n. 6
0
    def __call__(self, model, session=None):
        pha = self.pha
        arf, rmf = pha.get_response()

        if isinstance(model, string_types):
            if session is None:
                model = sherpa.astro.ui._session._eval_model_expression(model)
            else:
                model = session._eval_model_expression(model)

        # Automatically add exposure time to source model
        if pha.exposure is not None:
            model = pha.exposure * model
        elif arf is not None and arf.exposure is not None:
            model = arf.exposure * model

        if arf is not None and rmf is not None:
            return RSPModelPHA(arf, rmf, pha, model)

        if arf is not None:
            return ARFModelPHA(arf, pha, model)

        if rmf is not None:
            return RMFModelPHA(rmf, pha, model)

        raise DataErr('norsp', pha.name)
Esempio n. 7
0
def apply_areascal(mdl, pha, instlabel):
    """Apply the AREASCAL conversion.

    This should be done after applying any RMF or ARF.

    Parameters
    ----------
    mdl : array
        The model values, after being passed through the response.
        The assumption is that the output is in channel space. No
        filtering is assumed to have been applied.
    pha : sherpa.astro.data.DataPHA object
        The PHA object containing the AREASCAL column, scalar, or
        None value.
    instlabel : str
        The name of the response (expected to be of the form
        'RMF: filename'). This is only used in case the size of out
        does not match the AREASCAL vector.

    Returns
    -------
    ans : array
        If AREASCAL is defined then the output is mdl * AREASCAL,
        otherwise it is just the input array (i.e. mdl).
    """

    ascal = pha.areascal
    if ascal is None:
        return mdl

    if numpy.iterable(ascal) and len(ascal) != len(mdl):
        raise DataErr('mismatch', instlabel, 'AREASCAL: {}'.format(pha.name))

    return mdl * ascal
Esempio n. 8
0
    def __call__(self, model, session=None):
        pha = self.pha
        # clear out any previous response filter
        pha.notice_response(False)

        if isinstance(model, string_types):
            if session is None:
                model = sherpa.astro.ui._session._eval_model_expression(model)
            else:
                model = session._eval_model_expression(model)

        arf, rmf = pha.get_response()
        err_msg = None

        if arf is None and rmf is None:
            raise DataErr('norsp', pha.name)

        if arf is None:
            err_msg = 'does not have an associated ARF'
        elif pha.exposure is None:
            err_msg = 'does not specify an exposure time'

        if err_msg:
            raise InstrumentErr('baddata', pha.name, err_msg)

        # Currently, the response is NOT noticed using pileup

        # ARF convolution done inside ISIS pileup module
        # on finite grid scale
        model = model.apply(self.pileup_model, pha.exposure, arf.energ_lo,
                            arf.energ_hi, arf.specresp, model)

        if rmf is not None:
            model = PileupRMFModel(rmf, model, pha)
        return model
Esempio n. 9
0
File: data.py Progetto: nplee/sherpa
 def __init__(self, name, datasets, numcores=1):
     if len(datasets) == 0:
         raise DataErr('zerodatasimulfit', type(self).__name__)
     self.name = name
     self.datasets = tuple(datasets)
     self.numcores = numcores
     NoNewAttributesAfterInit.__init__(self)
Esempio n. 10
0
 def eval_bkg_model(key, bkg):
     bmodel = self.bkgmodels.get(key)
     if bmodel is None:
         raise DataErr('bkgmodel', key)
     # FIXME: we're not using p here (and therefore assuming that the
     # parameter values have already been updated to match the contents
     # of p)
     return bmodel(*args, **kwargs)
Esempio n. 11
0
def make_ideal_rmf(e_min, e_max, offset=1, name='rmf'):
    """A simple in-memory representation of an ideal RMF.

    This RMF represents a 1-to-1 mapping from channel to energy
    bin (i.e. there's no blurring or secondary channels).

    Parameters
    ----------
    e_min, e_max : array
        The energy ranges corresponding to the channels. The units are
        in keV and each bin has energ_hi > energ_lo. The arrays are
        assumed to be ordered, but it is not clear yet whether they
        have to be in ascending order. The sizes must match each
        other. This corresponds to the E_MIN and E_MAX columns of
        the EBOUNDS extension of the RMF file format.
    offset : int, optional
        The value of the first channel (corresponding to the TLMIN
        value of the F_CHAN column of the 'MATRIX' or 'SPECRESP
        MATRIX' block. It is expected to be 0 or 1, but the only
        restriction is that it is 0 or greater.
    name : str, optional
        The name to give to the RMF instance.

    Returns
    -------
    rmf : sherpa.astro.data.DatAMRF
        The RMF.

    """

    elo = np.asarray(e_min)
    ehi = np.asarray(e_max)
    if elo.size != ehi.size:
        raise DataErr('mismatch', 'e_min', 'e_max')
    detchans = elo.size

    if offset < 0:
        raise ArgumentErr('bad', 'offset', 'value can not be negative')

    # The "ideal" matrix is the identity matrix, which, in compressed
    # form, is an array of 1.0's (matrix) and an array of locations
    # giving the column where the element is 1 (fchan). It appears
    # that this uses 1 indexing.
    #
    dummy = np.ones(detchans, dtype=np.int16)
    matrix = np.ones(detchans, dtype=np.float32)
    fchan = np.arange(1, detchans + 1, dtype=np.int16)

    return DataRMF(name=name,
                   detchans=detchans,
                   energ_lo=elo,
                   energ_hi=ehi,
                   n_grp=dummy,
                   n_chan=dummy,
                   f_chan=fchan,
                   matrix=matrix,
                   offset=offset)
Esempio n. 12
0
def make_arf(energ_lo, energ_hi, specresp=None, exposure=1.0, name='arf'):
    """A simple in-memory representation of an ARF.

    Parameters
    ----------
    energ_lo, energ_hi : array
        The energy grid over which the ARF is defined. The units are
        keV and each bin has energ_hi > energ_lo. The arrays are
        assumed to be ordered, but it is not clear yet whether they
        have to be in ascending order.
    specresp : array or None, optional
        The spectral response (effective area) for each bin, in cm^2.
        If not given then a value of 1.0 per bin is used.
    exposure : number, optional
        The exposure time, in seconds. It must be positive.
    name : str, optional
        The name to give to the ARF instance.

    Returns
    -------
    arf : sherpa.astro.data.DataARF
        The ARF.
    """

    elo = np.asarray(energ_lo)
    ehi = np.asarray(energ_hi)
    if elo.size != ehi.size:
        raise DataErr('mismatch', 'energ_lo', 'energ_hi')

    if specresp is None:
        specresp = np.ones(elo.size, dtype=np.float32)
    else:
        specresp = np.asarray(specresp)
        if specresp.size != elo.size:
            raise DataErr('mismatch', 'energy grid', 'effarea')

    if exposure <= 0.0:
        raise ArgumentErr('bad', 'exposure', 'value must be positive')

    return DataARF(name=name,
                   energ_lo=elo,
                   energ_hi=ehi,
                   specresp=specresp,
                   exposure=exposure)
Esempio n. 13
0
File: data.py Progetto: nplee/sherpa
 def mask(self, val):
     if (val is True) or (val is False):
         self._mask = val
     # if val is of type np.bool_ and True, it failed the previous test because
     # "is True" compares with Python "True" singelton.
     # Yet, we do not want to allow arbitrary values that evaluate as True.
     elif val is numpy.ma.nomask:
         self._mask = True
     elif numpy.isscalar(val) and isinstance(val, numpy.bool_):
         self._mask = bool(val)
     elif (val is None) or numpy.isscalar(val):
         raise DataErr('ismask')
     else:
         self._mask = numpy.asarray(val, numpy.bool_)
Esempio n. 14
0
    def __call__(self, model):
        pha = self.pha
        arf, rmf = pha.get_response(self.resp_id)

        # Automatically add exposure time to source model
        if pha.exposure is not None:
            model = pha.exposure * model
        elif arf is not None and arf.exposure is not None:
            model = arf.exposure * model

        if arf is not None and rmf is not None:
            return sherpa.astro.instrument.RSPModelPHA(arf, rmf, pha, model)

        if arf is not None:
            return sherpa.astro.instrument.ARFModelPHA(arf, pha, model)

        if rmf is not None:
            return sherpa.astro.instrument.RMFModelPHA(rmf, pha, model)

        raise DataErr('norsp', pha.name)
Esempio n. 15
0
    def __init__(self, source, pha):
        self.channel = pha.channel
        self.mask = numpy.ones(len(pha.channel), dtype=bool)
        self.pha = pha
        self.source = source
        self.elo = None
        self.ehi = None
        self.lo = None
        self.hi = None
        self.table = None
        self.orders = None

        models = []
        grid = []

        for id in pha.response_ids:
            arf, rmf = pha.get_response(id)

            if arf is None and rmf is None:
                raise DataErr('norsp', pha.name)

            m = ResponseNestedModel(arf, rmf)
            indep = None

            if arf is not None:
                indep = arf.get_indep()

            if rmf is not None:
                indep = rmf.get_indep()

            models.append(m)
            grid.append(indep)

        self.models = models
        self.grid = grid

        name = '%s(%s)' % (type(self).__name__, ','.join(
            ['%s(%s)' % (m.name, source.name) for m in models]))
        CompositeModel.__init__(self, name, (source, ))
Esempio n. 16
0
    def to_fit(self, staterrfunc=None):
        total_dep = []
        total_staterror = []
        total_syserror = []

        no_staterror = True
        no_syserror = True

        for data in self.datasets:
            dep, staterror, syserror = data.to_fit(staterrfunc)

            total_dep.append(dep)

            if staterror is not None:
                no_staterror = False
            total_staterror.append(staterror)

            if syserror is not None:
                no_syserror = False
            else:
                syserror = numpy.zeros_like(dep)
            total_syserror.append(syserror)

        total_dep = numpy.concatenate(total_dep)

        if no_staterror:
            total_staterror = None
        elif numpy.any(
            [numpy.equal(array, None).any() for array in total_staterror]):
            raise DataErr('staterrsimulfit')
        else:
            total_staterror = numpy.concatenate(total_staterror)

        if no_syserror:
            total_syserror = None
        else:
            total_syserror = numpy.concatenate(total_syserror)

        return (total_dep, total_staterror, total_syserror)
Esempio n. 17
0
def _pack_pha(dataset):
    """Extract FITS column and header information.

    Notes
    -----
    The PHA Data Extension header page [1]_ lists the following
    keywords as either required or we-really-want-them:

        EXTNAME (= SPECTRUM) - the name (i.e. type) of the extension
        TELESCOP - the "telescope" (i.e. mission/satellite name).
        INSTRUME - the instrument/detector.
        FILTER - the instrument filter in use (if any)
        EXPOSURE - the integration time (in seconds) for the PHA data (assumed to be corrected for deadtime, data drop-outs etc. )
        BACKFILE - the name of the corresponding background file (if any)
        CORRFILE - the name of the corresponding correction file (if any)
        CORRSCAL - the correction scaling factor.
        RESPFILE - the name of the corresponding (default) redistribution matrix file (RMF; see George et al. 1992a).
        ANCRFILE - the name of the corresponding (default) ancillary response file (ARF; see George et al. 1992a).
        HDUCLASS - should contain the string "OGIP" to indicate that this is an OGIP style file.
        HDUCLAS1 - should contain the string "SPECTRUM" to indicate this is a spectrum.
        HDUVERS - the version number of the format (this document describes version 1.2.1)
        POISSERR - whether Poissonian errors are appropriate to the data (see below).
        CHANTYPE - whether the channels used in the file have been corrected in any way (see below).
        DETCHANS - the total number of detector channels available.

    We also add in the following, defaulting to the first value - we
    should do better to support HDUCLAS3=RATE data!

        HDUCLAS2 - indicating the type of data stored.
          Allowed values are:
            'TOTAL' for a gross PHA Spectrum (source + bkgd)
            'NET' for a bkgd-subtracted PHA Spectrum
            'BKG' for a bkgd PHA Spectrum
        HDUCLAS3 - indicating further details of the type of data stored.
          Allowed values are:
            'COUNT' for PHA data stored as counts (rather than count/s)
            'RATE' for PHA data stored in count/s
        HDUCLAS4 - indicating whether this is a type I or II extension.
          Allowed values are:
            'TYPE:I' for type I (single spectrum) data
            'TYPE:II' for type II (multiple spectra) data

    The POISSERR keyword is not required if a STAT_ERR column is
    present however it is recommended in this case for clarity. If
    STAT_ERR is to be used for the errors then POISSERR is set to
    false.

    If the CHANNEL array doesn't start at 1 then TLMIN1 and TLMAX1 are
    required (here we assume the CHANNEL column is first) and they are
    strongly recommended otherwise.

    References
    ----------

    .. [1] https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/node6.html

    """

    # The logic here repeats some of the checks that probably should
    # be done by the DataPHA class itself. However, it is likely
    # that we don't want to make the DataPHA class always reject
    # inconsistent state, as this could preclude certain workflows,
    # so we need some validation here.
    #
    if not isinstance(dataset, DataPHA):
        raise IOErr("notpha", dataset.name)

    arf, rmf = dataset.get_response()
    bkg = dataset.get_background()

    # The default keywords; these wil be over-ridden by
    # anything set by the input.
    #
    default_header = {
        "HDUCLASS": "OGIP",
        "HDUCLAS1": "SPECTRUM",
        "HDUCLAS2": "TOTAL",
        "HDUCLAS3": "COUNT",
        "HDUCLAS4": "TYPE:I",
        "HDUVERS": "1.2.1",
        "HDUDOC": "Arnaud et al. 1992a Legacy 2  p 65",

        # Rely on the DataPHA class to have set up TELESCOP/INSTRUME/FILTER
        # based on any associated background or response. If the user has
        # changed them then so be it.
        #
        "TELESCOP": "none",
        "INSTRUME": "none",
        "FILTER": "none",
        "CORRFILE": "none",
        "CORRSCAL": 0,
        "CHANTYPE": "PI",
        "RESPFILE": "none",
        "ANCRFILE": "none",
        "BACKFILE": "none"
    }

    # Header Keys
    header = {}
    if hasattr(dataset, "header"):
        header = dataset.header.copy()

    # Merge the keywords
    #
    header = {**default_header, **header}

    # Over-write the header value (if set)
    header["EXPOSURE"] = getattr(dataset, "exposure", "none")

    _set_keyword(header, "RESPFILE", rmf)
    _set_keyword(header, "ANCRFILE", arf)
    _set_keyword(header, "BACKFILE", bkg)

    # The column ordering for the ouput file is determined by the
    # order the keys are added to the data dict.
    #
    # TODO: perhaps we should error out if channel or counts is not set?
    #
    data = {}
    data["channel"] = getattr(dataset, "channel", None)
    data["counts"] = getattr(dataset, "counts", None)
    data["stat_err"] = getattr(dataset, "staterror", None)
    data["sys_err"] = getattr(dataset, "syserror", None)
    data["bin_lo"] = getattr(dataset, "bin_lo", None)
    data["bin_hi"] = getattr(dataset, "bin_hi", None)
    data["grouping"] = getattr(dataset, "grouping", None)
    data["quality"] = getattr(dataset, "quality", None)

    def convert_scale_value(colname):
        val = getattr(dataset, colname, None)
        uname = colname.upper()
        if val is None:
            header[uname] = 1.0
            return

        if numpy.isscalar(val):
            header[uname] = val
        else:
            data[colname] = val
            try:
                del header[uname]
            except KeyError:
                pass

    # This over-writes (or deletes) the header
    convert_scale_value("backscal")
    convert_scale_value("areascal")

    # Replace columns where appropriate.
    #
    if data["sys_err"] is None or (data["sys_err"] == 0).all():
        header["SYS_ERR"] = 0.0
        del data["sys_err"]

    if data["quality"] is None or (data["quality"] == 0).all():
        header["QUALITY"] = 0
        del data["quality"]

    if data["grouping"] is None or (data["grouping"] == 1).all():
        header["GROUPING"] = 0
        del data["grouping"]

    # Default to using the STAT_ERR column if set. This is only
    # changed if the user has not set the POISSERR keyword: this
    # keyword is likely to be set for data that has been read in from
    # a file.
    #
    if "POISSERR" not in header:
        header["POISSERR"] = data["stat_err"] is None

    # We are not going to match OGIP standard if there's no data...
    #
    # It's also not clear how to handle the case when the channel
    # range is larger than the channel column. At present we rely in
    # the header being set, which is not ideal. There is also the
    # question of whether we should change all header values if
    # any are missing, or do it on a keyword-by-keyword basis.
    #
    # The assumption here is that "channel" is the first keyword
    # added to the data dictionary.
    #
    if data["channel"] is not None:
        tlmin = data["channel"][0]
        tlmax = data["channel"][-1]

        if "TLMIN1" not in header:
            header["TLMIN1"] = tlmin

        if "TLMAX1" not in header:
            header["TLMAX1"] = tlmax

        if "DETCHANS" not in header:
            header["DETCHANS"] = tlmax - tlmin + 1

    data = {k.upper(): v for (k, v) in data.items() if v is not None}

    # Enforce the column types:
    #   CHANNEL:  Int2 or Int4
    #   COUNTS:   Int2, Int4, or Real4
    #   GROUPING: Int2
    #   QUALITY:  Int2
    #
    # Rather than try to work out whether to use Int2 or Int4
    # just use Int4.
    #
    def convert(column, dtype):
        try:
            vals = data[column]
        except KeyError:
            return

        # assume vals is a numpy array
        if vals.dtype == dtype:
            return

        # Do we warn if we are doing unit conversion? For now
        # we don't.
        #
        data[column] = vals.astype(dtype)

    convert("CHANNEL", numpy.int32)
    convert("GROUPING", numpy.int16)
    convert("QUALITY", numpy.int16)

    # COUNTS has to deal with integer or floating-point.
    #
    try:
        vals = data["COUNTS"]
        if numpy.issubdtype(vals.dtype, numpy.integer):
            vals = vals.astype(numpy.int32)
        elif numpy.issubdtype(vals.dtype, numpy.floating):
            vals = vals.astype(numpy.float32)
        else:
            raise DataErr("ogip-error", "PHA dataset", dataset.name,
                          "contains an unsupported COUNTS column")

        data["COUNTS"] = vals

    except KeyError:
        pass

    return data, header
Esempio n. 18
0
 def _check_shape(self):
     if self.shape is None:
         raise DataErr('shape', self.name)
Esempio n. 19
0
 def __init__(self, name, datasets):
     if len(datasets) == 0:
         raise DataErr('zerodatasimulfit', type(self).__name__)
     datasets = tuple(datasets)
     BaseData.__init__(self)
Esempio n. 20
0
 def _no_dim_error(self):
     raise DataErr('nodim', self.name)
Esempio n. 21
0
 def _no_image_error(self):
     raise DataErr('notimage', self.name)
Esempio n. 22
0
def fake_pha(data,
             model,
             is_source=True,
             pileup_model=None,
             add_bkgs=False,
             bkg_models={},
             id=None):
    """Simulate a PHA data set from a model.

    This function replaces the counts in a PHA dataset with simulated counts
    drawn from a model with Poisson noise. For the simulations, all the details
    already set up in the PHA dataset will be used, including the exposure
    time, one or more ARFs and RMFs, area and background scalings,
    grouping, and data quality arrys.

    Including a background component is optional; if requested, the background
    will be a Poisson draw from the average of all backgrounds that have been
    set for the input `sherpa.astro.data.DataPHA`. For each background
    component, the method can use either the PHA distribution in that
    background component or a model that is evaluated using the response
    set for that background component.
    The later case avoids adding extra noise from a Poisson draw from a
    distribution that might already have very few counts in the first place.

    The backgrounds itself are not changed by this function. To simulate
    backgrounds as well as the source spectrum, call this function on the
    source PHA dataset and the background PHA dataset(s) independently.

    Parameters
    ----------
    data : sherpa.astro.data.DataPHA
        The dataset (may be a background dataset).
    model : sherpa.models.model.ArithmeticModel instance
        The model that will be used for simulations.
    is_source : bool
        ``True`` means that the ``model`` does not contain response or
        background components and that these need to be added based on the
        ARF, RMF, and backgrounds set up for the data set. If ``False``, then
        the ``model`` contains any components to describe the instrument
        already.
    pileup_model : None or `sherpa.astro.models.JDPileup` instance
        Pileup Model for the source spectrum
    add_bkgs : bool
        If ``True`` backgrounds are added to the source counts.
    bkg_srcs : dict
        Keys in the dictionary need to be the background ids in the dataset
        ``data``, and the values are the corresponsing source models. For all
        background datasets that are listed in this dictionary, the
        background counts will be simulated based on the model, appropriately
        scaled (for area etc.) and added to the source. The same ``is_source``
        setting as for the source model applies here, i.e. if the source model
        already contains the ARF and the RMF, then the background models should
        do so, too. This setting has no effect if ``add_bkgs=False``.

        For all background ids not listed in this dictionary, the
        counts will be drawn from the PHA data of the background data set.
    id : str
        String with id number if called from UI layer. This is only used for
        certain error messages.

    Examples
    --------
        Estimate the signal from a 5000 second observation using the
        ARF and RMF from "src.arf" and "src.rmf" respectively:

        >>> set_source(1, xsphabs.gal * xsapec.clus)
        >>> gal.nh = 0.12
        >>> clus.kt, clus.abundanc = 4.5, 0.3
        >>> clus.redshift = 0.187
        >>> clus.norm = 1.2e-3
        >>> fake_pha(1, 'src.arf', 'src.rmf', 5000)

        Simulate a 1 mega second observation for the data and model
        from the default data set. The simulated data will include an
        estimated background component based on scaling the existing
        background observations for the source. The simulated data
        set, which has the same grouping as the default set, for
        easier comparison, is created with the 'sim' label and then
        written out to the file 'sim.pi':

        >>> arf = get_arf()
        >>> rmf = get_rmf()
        >>> bkg = get_bkg()
        >>> bscal = get_backscal()
        >>> grp = get_grouping()
        >>> qual = get_quality()
        >>> texp = 1e6
        >>> set_source('sim', get_source())
        >>> fake_pha('sim', arf, rmf, texp, backscal=bscal, bkg=bkg,
        ...          grouping=grp, quality=qual, grouped=True)
        >>> save_pha('sim', 'sim.pi')

    """
    if len(data.response_ids) == 0:
        raise DataErr('normffake', data.name)

    if is_source:
        model = get_response_for_pha(data,
                                     model,
                                     bkg_srcs={},
                                     pileup_model=pileup_model,
                                     id=id)

    # Get one RMF. Hopefully all of them have the same number of
    # channels, but that sanity check should really be done elsewhere.
    rmf0 = data.get_rmf(data.response_ids[0])
    data.channel = np.arange(1, rmf0.detchans + 1)

    # Calculate the source model, and take a Poisson draw based on
    # the source model.  That becomes the simulated data.
    data.counts = sherpa.utils.poisson_noise(data.eval_model(model))

    # Add in background counts:
    #  -- Scale each background properly given data's
    #     exposure time, BACKSCAL and AREASCAL
    #  -- Take average of scaled backgrounds
    #  -- Take a Poisson draw based on the average scaled background
    #  -- Add that to the simulated data counts
    #
    # Adding background counts is OPTIONAL, only done if user sets
    # "bkg" argument to fake_pha.  The reason is that the user could
    # well set a "source" model that does include a background
    # component.  In that case users should have the option to simulate
    # WITHOUT background counts being added in.
    #
    if add_bkgs:
        nbkg = len(data.background_ids)
        b = 0
        for bkg_id in data.background_ids:
            # we do (probably) want to filter and group the scale array
            scale = data.get_background_scale(bkg_id)
            if bkg_id in bkg_models:
                bkg_pha = data.get_background(bkg_id)
                bkg_model = bkg_models[bkg_id]
                if is_source:
                    bkg_model = get_response_for_pha(bkg_pha, bkg_model, id=id)

                # Exposure in background could differ from exposure in
                # source. But need to set here so eval_model works
                # correctly.
                # (At least I think that's how it works.)
                orig_exposure = bkg_pha.exposure
                bkg_pha.exposure = data.exposure
                # No Poisson here because we make a Poisson draw
                # later using the average of all backgrounds
                cts = bkg_pha.eval_model(bkg_model)
                bkg_pha.exposure = orig_exposure
            else:
                cts = data.get_background(bkg_id).counts
            b += scale * cts

        if nbkg > 0:
            b = b / nbkg
            b_poisson = sherpa.utils.poisson_noise(b)
            data.counts = data.counts + b_poisson
Esempio n. 23
0
def calc_kcorr(data, model, z, obslo, obshi, restlo=None, resthi=None):
    """Calculate the K correction for a model.

    The K correction ([1]_, [2]_, [3]_, [4]_) is the numeric
    factor applied to measured energy fluxes to convert values in
    an observed energy band to that they are in a rest-frame
    energy band (that is, correct for the change in spectral shape
    between the rest-frame and observed-frame bands). This is
    often used when converting a flux into a luminosity.

    Parameters
    ----------
    data
       The data object to use.
    model
       The source expression: this should not include any instrument
       responses.
    z : number or array, >= 0
       The redshift, or redshifts, of the source.
    obslo : number
       The minimum energy of the observed band.
    obshi : number
       The maximum energy of the observed band, which must
       be larger than `obslo`.
    restlo : number or ``None``
       The minimum energy of the rest-frame band. If ``None`` then
       use `obslo`.
    restlo : number or ``None``
       The maximum energy of the rest-frame band. It must be
       larger than `restlo`. If ``None`` then use `obshi`.

    Returns
    -------
    kz : number or array of numbers

    Notes
    -----
    This is only defined when the analysis is in 'energy' units.

    If the model contains a redshift parameter then it should
    be set to ``0``, rather than the source redshift.

    If the source model is at zero redshift, the observed energy
    band is olo to ohi, and the rest frame band is rlo to rhi
    (which need not match the observed band), then the K
    correction at a redshift z can be calculated as::

      frest = calc_energy_flux(data, model, rlo, rhi)
      fobs  = calc_energy_flux(data, model, olo*(1+z), ohi*(1+z))
      kz    = frest / fobs

    The energy ranges used - rlo to rhi and olo*(1+z) to ohi*(1+z)
    - should be fully covered by the data grid, otherwise the flux
    calculation will be truncated at the grid boundaries, leading
    to incorrect results.

    References
    ----------

    .. [1] "The K correction", Hogg, D.W., et al.
           http://arxiv.org/abs/astro-ph/0210394

    .. [2] Appendix B of Jones et al. 1998, ApJ, vol 495,
           p. 100-114.
           http://adsabs.harvard.edu/abs/1998ApJ...495..100J

    .. [3] "K and evolutionary corrections from UV to IR",
           Poggianti, B.M., A&AS, 1997, vol 122, p. 399-407.
           http://adsabs.harvard.edu/abs/1997A%26AS..122..399P

    .. [4] "Galactic evolution and cosmology - Probing the
           cosmological deceleration parameter", Yoshii, Y. &
           Takahara, F., ApJ, 1988, vol 326, p. 1-18.
           http://adsabs.harvard.edu/abs/1988ApJ...326....1Y

    """

    if restlo is None:
        restlo = obslo
    if resthi is None:
        resthi = obshi

    if numpy.isscalar(z):
        z = numpy.array([z], dtype=float)
    else:
        z = numpy.asarray(z)

    if 0 != sum(z[z < 0]):
        raise IOErr('z<=0')

    if obslo <= 0 or restlo <= 0 or obshi <= obslo or resthi <= restlo:
        raise IOErr('erange')

    if hasattr(data, 'get_response'):
        arf, rmf = data.get_response()
        elo = data.bin_lo
        ehi = data.bin_hi
        if arf is not None:
            elo = arf.energ_lo
            ehi = arf.energ_hi
        elif rmf is not None:
            elo = rmf.energ_lo
            ehi = rmf.energ_hi
    else:
        elo, ehi = data.get_indep()

    if elo is None or ehi is None:
        raise DataErr('noenergybins', data.name)

    emin = elo[0]
    emax = ehi[-1]

    if restlo < emin or resthi > emax:
        raise IOErr('energoverlap', emin, emax, 'rest-frame',
                    restlo, resthi, '')

    if obslo * (1.0 + z.min()) < emin:
        raise IOErr('energoverlap', emin, emax, 'observed-frame',
                    restlo, resthi, "at a redshift of %f" % z.min())

    if obshi * (1.0 + z.max()) > emax:
        raise IOErr('energoverlap', emin, emax, 'rest-frame',
                    restlo, resthi, "at a redshift of %f" % z.min())

    zplus1 = z + 1.0
    flux_rest = _flux(data, restlo, resthi, model, eflux=True)
    obs = numpy.asarray([_flux(data, obslo * zz, obshi * zz, model, eflux=True)
                         for zz in zplus1], dtype=float)
    kcorr = flux_rest / obs

    if len(kcorr) == 1:
        return kcorr[0]

    return kcorr
Esempio n. 24
0
 def _wrong_dim_error(self, baddim):
     raise DataErr('wrongdim', self.name, baddim)