Пример #1
0
def get_rmf_data(arg, make_copy=False):
    """
    arg is a filename or a HDUList object.
    """

    rmf, filename = _get_file_contents(arg,
                                       exptype="BinTableHDU",
                                       nobinary=True)

    try:
        if _has_hdu(rmf, 'MATRIX'):
            hdu = rmf['MATRIX']
        elif _has_hdu(rmf, 'SPECRESP MATRIX'):
            hdu = rmf['SPECRESP MATRIX']
        elif _has_hdu(rmf, 'AXAF_RMF'):
            hdu = rmf['AXAF_RMF']
        elif _is_ogip_type(rmf, 'RESPONSE', bltype2='RSP_MATRIX'):
            hdu = rmf[1]
        else:
            raise IOErr('notrsp', filename, 'an RMF')

        data = {}

        data['detchans'] = SherpaUInt(_require_key(hdu, 'DETCHANS'))
        data['energ_lo'] = _require_col(hdu, 'ENERG_LO', fix_type=True)
        data['energ_hi'] = _require_col(hdu, 'ENERG_HI', fix_type=True)
        data['n_grp'] = _require_col(hdu,
                                     'N_GRP',
                                     fix_type=True,
                                     dtype=SherpaUInt)
        data['f_chan'] = _require_vec(hdu,
                                      'F_CHAN',
                                      fix_type=True,
                                      dtype=SherpaUInt)
        data['n_chan'] = _require_vec(hdu,
                                      'N_CHAN',
                                      fix_type=True,
                                      dtype=SherpaUInt)
        # Read MATRIX as-is -- we will flatten it below, because
        # we need to remove all rows corresponding to n_grp[row] == 0
        data['matrix'] = None
        if 'MATRIX' not in hdu.columns.names:
            pass
        else:
            data['matrix'] = hdu.data.field('MATRIX')

        data['header'] = _get_meta_data(hdu)
        data['header'].pop('DETCHANS')

        # Beginning of non-Chandra RMF support
        fchan_col = list(hdu.columns.names).index('F_CHAN') + 1
        tlmin = _try_key(hdu, 'TLMIN' + str(fchan_col), True, SherpaUInt)

        if tlmin is not None:
            data['offset'] = tlmin
        else:
            # QUS: should this actually be an error, rather than just
            #      something that is logged to screen?
            error("Failed to locate TLMIN keyword for F_CHAN" +
                  " column in RMF file '%s'; " % filename +
                  'Update the offset value in the RMF data set to' +
                  ' appropriate TLMIN value prior to fitting')

        if _has_hdu(rmf, 'EBOUNDS'):
            hdu = rmf['EBOUNDS']
            data['e_min'] = _try_col(hdu, 'E_MIN', fix_type=True)
            data['e_max'] = _try_col(hdu, 'E_MAX', fix_type=True)

            # Beginning of non-Chandra RMF support
            chan_col = list(hdu.columns.names).index('CHANNEL') + 1
            tlmin = _try_key(hdu, 'TLMIN' + str(chan_col), True, SherpaUInt)
            if tlmin is not None:
                data['offset'] = tlmin

        else:
            data['e_min'] = None
            data['e_max'] = None
    finally:
        rmf.close()

    # ## For every row i of the response matrix, such that
    # ## n_grp[i] == 0, we need to remove that row from the
    # ## n_chan, f_chan, and matrix arrays we are constructing
    # ## to be passed up to the DataRMF data structure.

    # ## This is trivial for n_chan and f_chan.  For the matrix
    # ## array this can be more work -- can't just remove all
    # ## zeroes, because some rows where n_grp[row] > 0 might
    # ## still have zeroes in the matrix.  I add new code first
    # ## to deal with the matrix, then simpler code to remove zeroes
    # ## from n_chan and f_chan.

    # Read in MATRIX column with structure as-is -- i.e., as an array of
    # arrays.  Then flatten it, but include only those arrays that come from
    # rows where n_grp[row] > 0.  Zero elements can only be included from
    # rows where n_grp[row] > 0.  SMD 05/23/13

    if not isinstance(data['matrix'], _VLF):
        data['matrix'] = None
        raise IOErr('badfile', filename,
                    " MATRIX column not a variable length field")

    good = (data['n_grp'] > 0)
    data['matrix'] = data['matrix'][good]
    data['matrix'] = numpy.concatenate(
        [numpy.asarray(row) for row in data['matrix']])
    data['matrix'] = data['matrix'].astype(SherpaFloat)

    # Flatten f_chan and n_chan vectors into 1D arrays as crates does
    # according to group
    if data['f_chan'].ndim > 1 and data['n_chan'].ndim > 1:
        f_chan = []
        n_chan = []
        for grp, fch, nch, in izip(data['n_grp'], data['f_chan'],
                                   data['n_chan']):
            for i in xrange(grp):
                f_chan.append(fch[i])
                n_chan.append(nch[i])

        data['f_chan'] = numpy.asarray(f_chan, SherpaUInt)
        data['n_chan'] = numpy.asarray(n_chan, SherpaUInt)
    else:
        if len(data['n_grp']) == len(data['f_chan']):
            # filter out groups with zeroes.
            good = (data['n_grp'] > 0)
            data['f_chan'] = data['f_chan'][good]
            data['n_chan'] = data['n_chan'][good]

    return data, filename
Пример #2
0
def _pack_pha(dataset):
    """Extract FITS column and header information.

    Notes
    -----
    The PHA Data Extension header page [1]_ lists the following
    keywords as either required or we-really-want-them:

        EXTNAME (= SPECTRUM) - the name (i.e. type) of the extension
        TELESCOP - the "telescope" (i.e. mission/satellite name).
        INSTRUME - the instrument/detector.
        FILTER - the instrument filter in use (if any)
        EXPOSURE - the integration time (in seconds) for the PHA data (assumed to be corrected for deadtime, data drop-outs etc. )
        BACKFILE - the name of the corresponding background file (if any)
        CORRFILE - the name of the corresponding correction file (if any)
        CORRSCAL - the correction scaling factor.
        RESPFILE - the name of the corresponding (default) redistribution matrix file (RMF; see George et al. 1992a).
        ANCRFILE - the name of the corresponding (default) ancillary response file (ARF; see George et al. 1992a).
        HDUCLASS - should contain the string "OGIP" to indicate that this is an OGIP style file.
        HDUCLAS1 - should contain the string "SPECTRUM" to indicate this is a spectrum.
        HDUVERS - the version number of the format (this document describes version 1.2.1)
        POISSERR - whether Poissonian errors are appropriate to the data (see below).
        CHANTYPE - whether the channels used in the file have been corrected in any way (see below).
        DETCHANS - the total number of detector channels available.

    We also add in the following, defaulting to the first value - we
    should do better to support HDUCLAS3=RATE data!

        HDUCLAS2 - indicating the type of data stored.
          Allowed values are:
            'TOTAL' for a gross PHA Spectrum (source + bkgd)
            'NET' for a bkgd-subtracted PHA Spectrum
            'BKG' for a bkgd PHA Spectrum
        HDUCLAS3 - indicating further details of the type of data stored.
          Allowed values are:
            'COUNT' for PHA data stored as counts (rather than count/s)
            'RATE' for PHA data stored in count/s
        HDUCLAS4 - indicating whether this is a type I or II extension.
          Allowed values are:
            'TYPE:I' for type I (single spectrum) data
            'TYPE:II' for type II (multiple spectra) data

    The POISSERR keyword is not required if a STAT_ERR column is
    present however it is recommended in this case for clarity. If
    STAT_ERR is to be used for the errors then POISSERR is set to
    false.

    If the CHANNEL array doesn't start at 1 then TLMIN1 and TLMAX1 are
    required (here we assume the CHANNEL column is first) and they are
    strongly recommended otherwise.

    References
    ----------

    .. [1] https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/node6.html

    """

    # The logic here repeats some of the checks that probably should
    # be done by the DataPHA class itself. However, it is likely
    # that we don't want to make the DataPHA class always reject
    # inconsistent state, as this could preclude certain workflows,
    # so we need some validation here.
    #
    if not isinstance(dataset, DataPHA):
        raise IOErr("notpha", dataset.name)

    arf, rmf = dataset.get_response()
    bkg = dataset.get_background()

    # The default keywords; these wil be over-ridden by
    # anything set by the input.
    #
    default_header = {
        "HDUCLASS": "OGIP",
        "HDUCLAS1": "SPECTRUM",
        "HDUCLAS2": "TOTAL",
        "HDUCLAS3": "COUNT",
        "HDUCLAS4": "TYPE:I",
        "HDUVERS": "1.2.1",
        "HDUDOC": "Arnaud et al. 1992a Legacy 2  p 65",

        # Rely on the DataPHA class to have set up TELESCOP/INSTRUME/FILTER
        # based on any associated background or response. If the user has
        # changed them then so be it.
        #
        "TELESCOP": "none",
        "INSTRUME": "none",
        "FILTER": "none",
        "CORRFILE": "none",
        "CORRSCAL": 0,
        "CHANTYPE": "PI",
        "RESPFILE": "none",
        "ANCRFILE": "none",
        "BACKFILE": "none"
    }

    # Header Keys
    header = {}
    if hasattr(dataset, "header"):
        header = dataset.header.copy()

    # Merge the keywords
    #
    header = {**default_header, **header}

    # Over-write the header value (if set)
    header["EXPOSURE"] = getattr(dataset, "exposure", "none")

    _set_keyword(header, "RESPFILE", rmf)
    _set_keyword(header, "ANCRFILE", arf)
    _set_keyword(header, "BACKFILE", bkg)

    # The column ordering for the ouput file is determined by the
    # order the keys are added to the data dict.
    #
    # TODO: perhaps we should error out if channel or counts is not set?
    #
    data = {}
    data["channel"] = getattr(dataset, "channel", None)
    data["counts"] = getattr(dataset, "counts", None)
    data["stat_err"] = getattr(dataset, "staterror", None)
    data["sys_err"] = getattr(dataset, "syserror", None)
    data["bin_lo"] = getattr(dataset, "bin_lo", None)
    data["bin_hi"] = getattr(dataset, "bin_hi", None)
    data["grouping"] = getattr(dataset, "grouping", None)
    data["quality"] = getattr(dataset, "quality", None)

    def convert_scale_value(colname):
        val = getattr(dataset, colname, None)
        uname = colname.upper()
        if val is None:
            header[uname] = 1.0
            return

        if numpy.isscalar(val):
            header[uname] = val
        else:
            data[colname] = val
            try:
                del header[uname]
            except KeyError:
                pass

    # This over-writes (or deletes) the header
    convert_scale_value("backscal")
    convert_scale_value("areascal")

    # Replace columns where appropriate.
    #
    if data["sys_err"] is None or (data["sys_err"] == 0).all():
        header["SYS_ERR"] = 0.0
        del data["sys_err"]

    if data["quality"] is None or (data["quality"] == 0).all():
        header["QUALITY"] = 0
        del data["quality"]

    if data["grouping"] is None or (data["grouping"] == 1).all():
        header["GROUPING"] = 0
        del data["grouping"]

    # Default to using the STAT_ERR column if set. This is only
    # changed if the user has not set the POISSERR keyword: this
    # keyword is likely to be set for data that has been read in from
    # a file.
    #
    if "POISSERR" not in header:
        header["POISSERR"] = data["stat_err"] is None

    # We are not going to match OGIP standard if there's no data...
    #
    # It's also not clear how to handle the case when the channel
    # range is larger than the channel column. At present we rely in
    # the header being set, which is not ideal. There is also the
    # question of whether we should change all header values if
    # any are missing, or do it on a keyword-by-keyword basis.
    #
    # The assumption here is that "channel" is the first keyword
    # added to the data dictionary.
    #
    if data["channel"] is not None:
        tlmin = data["channel"][0]
        tlmax = data["channel"][-1]

        if "TLMIN1" not in header:
            header["TLMIN1"] = tlmin

        if "TLMAX1" not in header:
            header["TLMAX1"] = tlmax

        if "DETCHANS" not in header:
            header["DETCHANS"] = tlmax - tlmin + 1

    data = {k.upper(): v for (k, v) in data.items() if v is not None}

    # Enforce the column types:
    #   CHANNEL:  Int2 or Int4
    #   COUNTS:   Int2, Int4, or Real4
    #   GROUPING: Int2
    #   QUALITY:  Int2
    #
    # Rather than try to work out whether to use Int2 or Int4
    # just use Int4.
    #
    def convert(column, dtype):
        try:
            vals = data[column]
        except KeyError:
            return

        # assume vals is a numpy array
        if vals.dtype == dtype:
            return

        # Do we warn if we are doing unit conversion? For now
        # we don't.
        #
        data[column] = vals.astype(dtype)

    convert("CHANNEL", numpy.int32)
    convert("GROUPING", numpy.int16)
    convert("QUALITY", numpy.int16)

    # COUNTS has to deal with integer or floating-point.
    #
    try:
        vals = data["COUNTS"]
        if numpy.issubdtype(vals.dtype, numpy.integer):
            vals = vals.astype(numpy.int32)
        elif numpy.issubdtype(vals.dtype, numpy.floating):
            vals = vals.astype(numpy.float32)
        else:
            raise DataErr("ogip-error", "PHA dataset", dataset.name,
                          "contains an unsupported COUNTS column")

        data["COUNTS"] = vals

    except KeyError:
        pass

    return data, header
Пример #3
0
def get_image_data(arg, make_copy=False):
    """
    get_image_data( filename [, make_copy=False ])

    get_image_data( [PrimaryHDU] [, make_copy=False ])
    """
    filename = ''
    if type(arg) == str and is_binary_file(arg):
        hdu = pyfits.open(arg)
        filename = arg
    elif ( (type(arg) is pyfits.HDUList) and
           (len(arg) > 0 ) and
           (arg[0].__class__ is pyfits.PrimaryHDU) ):
        hdu = arg
        filename = hdu[0]._file.name
    else:
        raise IOErr('badfile', arg, "a binary FITS file or a PyFITS.PrimaryHDU list")

#   FITS uses logical-to-world where we use physical-to-world.
#   For all transforms, update their physical-to-world
#   values from their logical-to-world values.
#   Find the matching physical transform
#      (same axis no, but sub = 'P' )
#   and use it for the update.
#   Physical tfms themselves do not get updated.
#
#  Fill the physical-to-world transform given the
#  logical-to-world and the associated logical-to-physical.
#      W = wv + wd * ( P - wp )
#      P = pv + pd * ( L - pp )
#      W = lv + ld * ( L - lp )
# Then
#      L = pp + ( P - pv ) / pd
# so   W = lv + ld * ( pp + (P-pv)/pd - lp )
#        = lv + ( ld / pd ) * ( P - [ pv +  (lp-pp)*pd ] )
# Hence
#      wv = lv
#      wd = ld / pd
#      wp = pv + ( lp - pp ) * pd

#  EG suppose phys-to-world is
#         W =  1000 + 2.0 * ( P - 4.0 )
#  and we bin and scale to generate a logical-to-phys of
#         P =  20 + 4.0 * ( L - 10 )
#  Then
#         W = 1000 + 2.0 * ( (20-4) - 4 * 10 ) + 2 * 4 $
#

    try:
        data = {}

        img = hdu[0]
        if hdu[0].data is None:
            img = hdu[1]
            if hdu[1].data is None:
                raise IOErr('badimg', '')

        data['y'] = numpy.asarray(img.data)

        cdeltp = _get_wcs_key(img, 'CDELT1P', 'CDELT2P')
        crpixp = _get_wcs_key(img, 'CRPIX1P', 'CRPIX2P')
        crvalp = _get_wcs_key(img, 'CRVAL1P', 'CRVAL2P')
        cdeltw = _get_wcs_key(img, 'CDELT1', 'CDELT2')
        crpixw = _get_wcs_key(img, 'CRPIX1', 'CRPIX2')
        crvalw = _get_wcs_key(img, 'CRVAL1', 'CRVAL2')

        # proper calculation of cdelt wrt PHYSICAL coords
        if (( cdeltw != () ) and ( cdeltp != () ) ):
            cdeltw = cdeltw/cdeltp

        # proper calculation of crpix wrt PHYSICAL coords
        if (( crpixw != () ) and ( crvalp != () ) and
            ( cdeltp != () ) and ( crpixp != () ) ):
            crpixw = crvalp + ( crpixw - crpixp ) * cdeltp

        sky = None
        if(cdeltp != () and crpixp != () and crvalp != () and transformstatus):
            sky = WCS('physical', 'LINEAR', crvalp, crpixp, cdeltp)

        eqpos = None
        if(cdeltw != () and crpixw != () and crvalw != () and transformstatus):
            eqpos = WCS('world', 'WCS', crvalw, crpixw, cdeltw)

        data['sky'] = sky
        data['eqpos'] = eqpos
        data['header'] = _get_meta_data(img)

        keys = ['MTYPE1','MFORM1','CTYPE1P','CTYPE2P','WCSNAMEP','CDELT1P',
                'CDELT2P','CRPIX1P','CRPIX2P','CRVAL1P','CRVAL2P',
                'MTYPE2','MFORM2','CTYPE1','CTYPE2','CDELT1','CDELT2','CRPIX1',
                'CRPIX2','CRVAL1','CRVAL2','CUNIT1','CUNIT2','EQUINOX']

        for key in keys:
            try:
                data['header'].pop(key)
            except KeyError:
                pass

    finally:
        hdu.close()

    return data, filename
Пример #4
0
def get_pha_data(arg, make_copy=True, use_background=False):
    """
    get_pha_data( filename [, make_copy=True [, use_background=False]])

    get_pha_data( PHACrate [, make_copy=True [, use_background=False]])
    """
    filename = ''
    close_dataset = False
    if type(arg) == str:
        phadataset = open_crate_dataset(
            arg, pycrates.phacratedataset.PHACrateDataset)

        if pycrates.is_pha(phadataset) != 1:
            raise IOErr('badfile', arg, "PHACrateDataset obj")

        filename = arg
        close_dataset = True

    elif pycrates.is_pha(arg) == 1:
        phadataset = arg
        filename = arg.get_filename()
        make_copy = False

    else:
        raise IOErr('badfile', arg, "PHACrateDataset obj")

    pha = _get_crate_by_blockname(phadataset, "SPECTRUM")

    if pha is None:
        pha = phadataset.get_crate(phadataset.get_current_crate())
        if (pha.get_key('HDUCLAS1').value == 'SPECTRUM'
                or pha.get_key('HDUCLAS2').value == 'SPECTRUM'):
            pass
        else:
            pha = phadataset.get_crate(1)
            if (pha.get_key('HDUCLAS1').value == 'SPECTRUM'
                    or pha.get_key('HDUCLAS2').value == 'SPECTRUM'):
                pass
            else:
                # If background maybe better to go on to next block?
                pha = None

    if use_background:

        # Used to read BKGs found in an additional block of
        # Chandra Level 3 PHA files
        for ii in range(phadataset.get_ncrates()):
            block = phadataset.get_crate(ii + 1)
            hduclas2 = block.get_key('HDUCLAS2')
            if hduclas2 is not None and hduclas2.value == 'BKG':
                pha = block

    if pha is None or pha.get_colnames() is None:
        raise IOErr('filenotfound', arg)

    keys = [
        'BACKFILE', 'ANCRFILE', 'RESPFILE', 'BACKSCAL', 'AREASCAL', 'EXPOSURE'
    ]

    keys_or_cols = ['BACKSCAL', 'BACKSCUP', 'BACKSCDN', 'AREASCAL']

    datasets = []

    # Calling phadataset.is_pha_type1() is unreliable when
    # both TYPE:I and TYPE:II keywords are in the header.
    # Here, I instead test for a column, SPEC_NUM, that can
    # *only* be present in Type II. SMD 05/15/13
    if _try_col(pha, 'SPEC_NUM') is None:
        data = {}

        # Keywords
        data['exposure'] = _try_key(pha, 'EXPOSURE', SherpaFloat)
        #data['poisserr'] = _try_key(pha, 'POISSERR', bool)
        data['backfile'] = _try_key(pha, 'BACKFILE')
        data['arffile'] = _try_key(pha, 'ANCRFILE')
        data['rmffile'] = _try_key(pha, 'RESPFILE')

        # Keywords or columns
        for name in keys_or_cols:
            key = name.lower()
            data[key] = _try_key(pha, name, SherpaFloat)
            if data[key] is None:
                data[key] = _try_col(pha, name, make_copy)

        data['header'] = _get_meta_data(pha)
        for key in keys:
            try:
                data['header'].pop(key)
            except KeyError:
                pass

        # Columns

        if not pha.column_exists('CHANNEL'):
            raise IOErr('reqcol', 'CHANNEL', filename)

        data['channel'] = _require_col(pha,
                                       'CHANNEL',
                                       make_copy,
                                       fix_type=True)
        # Make sure channel numbers, not indices
        if int(data['channel'][0]) == 0 or pha.get_column(
                'CHANNEL').get_tlmin() == 0:
            data['channel'] = data['channel'] + 1

        data['counts'] = None
        if pha.column_exists('COUNTS'):
            data['counts'] = _require_col(pha,
                                          'COUNTS',
                                          make_copy,
                                          fix_type=True)
        else:
            if not pha.column_exists('RATE'):
                raise IOErr('reqcol', 'COUNTS or RATE', filename)
            data['counts'] = _require_col(
                pha, 'RATE', make_copy, fix_type=True) * data['exposure']

        data['staterror'] = _try_col(pha, 'STAT_ERR', make_copy)
        data['syserror'] = _try_col(pha, 'SYS_ERR', make_copy)
        data['background_up'] = _try_col(pha,
                                         'BACKGROUND_UP',
                                         make_copy,
                                         fix_type=True)
        data['background_down'] = _try_col(pha,
                                           'BACKGROUND_DOWN',
                                           make_copy,
                                           fix_type=True)
        data['bin_lo'] = _try_col(pha, 'BIN_LO', make_copy, fix_type=True)
        data['bin_hi'] = _try_col(pha, 'BIN_HI', make_copy, fix_type=True)
        data['grouping'] = _try_col(pha, 'GROUPING', make_copy)
        data['quality'] = _try_col(pha, 'QUALITY', make_copy)

        datasets.append(data)

    else:
        # Type 2 PHA file support
        data = {}
        num = pha.get_nrows()

        # Keywords
        exposure = _try_key(pha, 'EXPOSURE', SherpaFloat)
        #poisserr = _try_key(pha, 'POISSERR', bool)
        backfile = _try_key(pha, 'BACKFILE')
        arffile = _try_key(pha, 'ANCRFILE')
        rmffile = _try_key(pha, 'RESPFILE')

        # Keywords or columns
        backscal = _try_key_list(pha, 'BACKSCAL', num)
        if backscal is None:
            backscal = _try_col_list(pha, 'BACKSCAL', num, make_copy)

        backscup = _try_key_list(pha, 'BACKSCUP', num)
        if backscup is None:
            backscup = _try_col_list(pha, 'BACKSCUP', num, make_copy)

        backscdn = _try_key_list(pha, 'BACKSCDN', num)
        if backscdn is None:
            backscdn = _try_col_list(pha, 'BACKSCDN', num, make_copy)

        areascal = _try_key_list(pha, 'AREASCAL', num)
        if areascal is None:
            areascal = _try_col_list(pha, 'AREASCAL', num, make_copy)

        # Columns

        if not pha.column_exists('CHANNEL'):
            raise IOErr('reqcol', 'CHANNEL', filename)

        channel = _require_col_list(pha,
                                    'CHANNEL',
                                    num,
                                    make_copy,
                                    fix_type=True)
        # Make sure channel numbers, not indices
        for ii in range(num):
            if int(channel[ii][0]) == 0:
                channel[ii] += 1

        counts = None
        if pha.column_exists('COUNTS'):
            counts = _require_col_list(pha,
                                       'COUNTS',
                                       num,
                                       make_copy,
                                       fix_type=True)
        else:
            if not pha.column_exists('RATE'):
                raise IOErr('reqcol', 'COUNTS or RATE', filename)
            counts = _require_col_list(
                pha, 'RATE', num, make_copy, fix_type=True) * exposure

        staterror = _try_col_list(pha, 'STAT_ERR', num, make_copy)
        syserror = _try_col_list(pha, 'SYS_ERR', num, make_copy)
        background_up = _try_col_list(pha,
                                      'BACKGROUND_UP',
                                      num,
                                      make_copy,
                                      fix_type=True)
        background_down = _try_col_list(pha,
                                        'BACKGROUND_DOWN',
                                        num,
                                        make_copy,
                                        fix_type=True)
        bin_lo = _try_col_list(pha, 'BIN_LO', num, make_copy, fix_type=True)
        bin_hi = _try_col_list(pha, 'BIN_HI', num, make_copy, fix_type=True)
        grouping = _try_col_list(pha, 'GROUPING', num, make_copy)
        quality = _try_col_list(pha, 'QUALITY', num, make_copy)

        orders = _try_key_list(pha, 'TG_M', num)
        if orders is None:
            orders = _try_col_list(pha, 'TG_M', num, make_copy)

        parts = _try_key_list(pha, 'TG_PART', num)
        if parts is None:
            parts = _try_col_list(pha, 'TG_PART', num, make_copy)

        specnums = _try_col_list(pha, 'SPEC_NUM', num, make_copy)
        srcids = _try_col_list(pha, 'TG_SRCID', num, make_copy)

        # Iterate over all rows of channels, counts, errors, etc
        # Populate a list of dictionaries containing individual dataset info
        for (bscal, bscup, bscdn, arsc, chan, cnt, staterr, syserr, backup,
             backdown, binlo, binhi, grp, qual, ordr, prt, specnum,
             srcid) in izip(backscal, backscup, backscdn, areascal, channel,
                            counts, staterror, syserror, background_up,
                            background_down, bin_lo, bin_hi, grouping, quality,
                            orders, parts, specnums, srcids):

            data = {}

            data['exposure'] = exposure
            #data['poisserr'] = poisserr
            data['backfile'] = backfile
            data['arffile'] = arffile
            data['rmffile'] = rmffile

            data['backscal'] = bscal
            data['backscup'] = bscup
            data['backscdn'] = bscdn
            data['areascal'] = arsc

            data['channel'] = chan
            data['counts'] = cnt
            data['staterror'] = staterr
            data['syserror'] = syserr
            data['background_up'] = backup
            data['background_down'] = backdown
            data['bin_lo'] = binlo
            data['bin_hi'] = binhi
            data['grouping'] = grp
            data['quality'] = qual
            data['header'] = _get_meta_data(pha)
            data['header']['TG_M'] = ordr
            data['header']['TG_PART'] = prt
            data['header']['SPEC_NUM'] = specnum
            data['header']['TG_SRCID'] = srcid

            for key in keys:
                try:
                    data['header'].pop(key)
                except KeyError:
                    pass

            datasets.append(data)

    if close_dataset:
        close_crate_dataset(phadataset)
    return datasets, filename
Пример #5
0
def _require_key(hdu, name, fix_type=False, dtype=SherpaFloat):
    key = _try_key(hdu, name, fix_type, dtype)
    if key is None:
        raise IOErr('nokeyword', hdu._file.name, name)
    return key
Пример #6
0
def _require_tbl_col(hdu, name, dtype=SherpaFloat, fix_type=False):
    col = _try_tbl_col(hdu, name, dtype, fix_type)
    if len(col) > 0 and col[0] is None:
        raise IOErr('reqcol', name, hdu._file.name)
    return col
Пример #7
0
        tbl = None
        try:
            tbl = open_crate(arg)
        except Exception, e:
            raise e

        close_dataset = True
        filename = tbl.get_filename()

        # Make a copy of the data, since we don't know that pycrates will
        # do something sensible wrt reference counting
    elif isinstance(arg, pycrates.TABLECrate):
        tbl = arg
        filename = arg.get_filename()
    else:
        raise IOErr('badfile', arg, 'TABLECrate obj')

    # Crates "caches" open files by their filename in memory.  If you try
    # to open a file multiple times (with DM syntax) it corrupts the Crate
    # in memory.  This is a work-around to open the CrateDataset without
    # DM syntax and iterate through the crates looking for the block
    # name that matches.
    if blockname is not None:
        crate = _get_crate_by_blockname(tbl.get_dataset(), blockname)
        tbl = crate or tbl

    hdr = {}
    if hdrkeys is None:
        #hdrkeys = tbl.get_keynames()
        hdrkeys = pycrates.get_key_names(tbl)
Пример #8
0
def get_pha_data(arg, make_copy=False, use_background=False):
    """
    arg is a filename or a HDUList object
    """

    pha, filename = _get_file_contents(arg, exptype="BinTableHDU")

    try:
        if _has_hdu(pha, 'SPECTRUM'):
            hdu = pha['SPECTRUM']
        elif _is_ogip_type(pha, 'SPECTRUM'):
            hdu = pha[1]
        else:
            raise IOErr('notrsp', filename, "a PHA spectrum")

        if use_background:
            for block in pha:
                if _try_key(block, 'HDUCLAS2') == 'BKG':
                    hdu = block

        keys = [
            'BACKFILE', 'ANCRFILE', 'RESPFILE', 'BACKSCAL', 'AREASCAL',
            'EXPOSURE'
        ]
        datasets = []

        if _try_col(hdu, 'SPEC_NUM') is None:
            data = {}

            # Keywords
            data['exposure'] = _try_key(hdu, 'EXPOSURE', True, SherpaFloat)
            # data['poisserr'] = _try_key(hdu, 'POISSERR', True, bool)
            data['backfile'] = _try_key(hdu, 'BACKFILE')
            data['arffile'] = _try_key(hdu, 'ANCRFILE')
            data['rmffile'] = _try_key(hdu, 'RESPFILE')

            # Keywords or columns
            data['backscal'] = _try_col_or_key(hdu, 'BACKSCAL', fix_type=True)
            data['backscup'] = _try_col_or_key(hdu, 'BACKSCUP', fix_type=True)
            data['backscdn'] = _try_col_or_key(hdu, 'BACKSCDN', fix_type=True)
            data['areascal'] = _try_col_or_key(hdu, 'AREASCAL', fix_type=True)

            # Columns
            data['channel'] = _require_col(hdu, 'CHANNEL', fix_type=True)

            # Make sure channel numbers not indices
            chan = list(hdu.columns.names).index('CHANNEL') + 1
            tlmin = _try_key(hdu, 'TLMIN' + str(chan), True, SherpaUInt)
            if int(data['channel'][0]) == 0 or tlmin == 0:
                data['channel'] = data['channel'] + 1

            data['counts'] = _try_col(hdu, 'COUNTS', fix_type=True)
            data['staterror'] = _try_col(hdu, 'STAT_ERR')
            if data['counts'] is None:
                data['counts'] = _require_col(hdu, 'RATE',
                                              fix_type=True) * data['exposure']
                if data['staterror'] is not None:
                    data['staterror'] = data['staterror'] * data['exposure']
            data['syserror'] = _try_col(hdu, 'SYS_ERR')
            data['background_up'] = _try_col(hdu,
                                             'BACKGROUND_UP',
                                             fix_type=True)
            data['background_down'] = _try_col(hdu,
                                               'BACKGROUND_DOWN',
                                               fix_type=True)
            data['bin_lo'] = _try_col(hdu, 'BIN_LO', fix_type=True)
            data['bin_hi'] = _try_col(hdu, 'BIN_HI', fix_type=True)
            data['grouping'] = _try_col(hdu, 'GROUPING', SherpaInt)
            data['quality'] = _try_col(hdu, 'QUALITY', SherpaInt)
            data['header'] = _get_meta_data(hdu)
            for key in keys:
                try:
                    data['header'].pop(key)
                except KeyError:
                    pass

            if data['syserror'] is not None:
                # SYS_ERR is the fractional systematic error
                data['syserror'] = data['syserror'] * data['counts']

            datasets.append(data)

        else:
            data = {}
            # Type 2 PHA file support

            specnum = _try_col_or_key(hdu, 'SPEC_NUM')
            num = len(specnum)

            # Keywords
            exposure = _try_key(hdu, 'EXPOSURE', True, SherpaFloat)
            # poisserr = _try_key(hdu, 'POISSERR', True, bool)
            backfile = _try_key(hdu, 'BACKFILE')
            arffile = _try_key(hdu, 'ANCRFILE')
            rmffile = _try_key(hdu, 'RESPFILE')

            # Keywords or columns
            backscal = _try_vec_or_key(hdu, 'BACKSCAL', num, fix_type=True)
            backscup = _try_vec_or_key(hdu, 'BACKSCUP', num, fix_type=True)
            backscdn = _try_vec_or_key(hdu, 'BACKSCDN', num, fix_type=True)
            areascal = _try_vec_or_key(hdu, 'AREASCAL', num, fix_type=True)

            # Columns
            channel = _require_vec(hdu, 'CHANNEL', num, fix_type=True)

            # Make sure channel numbers not indices
            chan = list(hdu.columns.names).index('CHANNEL') + 1
            tlmin = _try_key(hdu, 'TLMIN' + str(chan), True, SherpaUInt)

            for ii in range(num):
                if int(channel[ii][0]) == 0:
                    channel[ii] += 1

            # if ((tlmin is not None) and tlmin == 0) or int(channel[0]) == 0:
            #     channel += 1

            counts = _try_vec(hdu, 'COUNTS', num, fix_type=True)
            staterror = _try_vec(hdu, 'STAT_ERR', num)
            if numpy.equal(
                    counts,
                    None).any():  # _try_vec can return an array of Nones
                counts = _require_vec(hdu, 'RATE', num,
                                      fix_type=True) * exposure
                if not numpy.equal(staterror, None).any():
                    staterror *= exposure

            syserror = _try_vec(hdu, 'SYS_ERR', num)
            background_up = _try_vec(hdu, 'BACKGROUND_UP', num, fix_type=True)
            background_down = _try_vec(hdu,
                                       'BACKGROUND_DOWN',
                                       num,
                                       fix_type=True)
            bin_lo = _try_vec(hdu, 'BIN_LO', num, fix_type=True)
            bin_hi = _try_vec(hdu, 'BIN_HI', num, fix_type=True)
            grouping = _try_vec(hdu, 'GROUPING', num, SherpaInt)
            quality = _try_vec(hdu, 'QUALITY', num, SherpaInt)

            orders = _try_vec(hdu, 'TG_M', num, SherpaInt)
            parts = _try_vec(hdu, 'TG_PART', num, SherpaInt)
            specnums = _try_vec(hdu, 'SPEC_NUM', num, SherpaInt)
            srcids = _try_vec(hdu, 'TG_SRCID', num, SherpaInt)

            # Iterate over all rows of channels, counts, errors, etc
            # Populate a list of dictionaries containing
            # individual dataset info
            for (bscal, bscup, bscdn, arsc, chan, cnt, staterr, syserr, backup,
                 backdown, binlo, binhi, group, qual, ordr, prt, specnum,
                 srcid) in zip(backscal, backscup, backscdn, areascal, channel,
                               counts, staterror, syserror, background_up,
                               background_down, bin_lo, bin_hi, grouping,
                               quality, orders, parts, specnums, srcids):
                data = {}

                data['exposure'] = exposure
                # data['poisserr'] = poisserr
                data['backfile'] = backfile
                data['arffile'] = arffile
                data['rmffile'] = rmffile

                data['backscal'] = bscal
                data['backscup'] = bscup
                data['backscdn'] = bscdn
                data['areascal'] = arsc

                data['channel'] = chan
                data['counts'] = cnt
                data['staterror'] = staterr
                data['syserror'] = syserr
                data['background_up'] = backup
                data['background_down'] = backdown
                data['bin_lo'] = binlo
                data['bin_hi'] = binhi
                data['grouping'] = group
                data['quality'] = qual
                data['header'] = _get_meta_data(hdu)
                data['header']['TG_M'] = ordr
                data['header']['TG_PART'] = prt
                data['header']['SPEC_NUM'] = specnum
                data['header']['TG_SRCID'] = srcid

                for key in keys:
                    try:
                        data['header'].pop(key)
                    except KeyError:
                        pass

                if syserr is not None:
                    # SYS_ERR is the fractional systematic error
                    data['syserror'] = syserr * cnt

                datasets.append(data)

    finally:
        pha.close()

    return datasets, filename
Пример #9
0
def calc_kcorr(data, model, z, obslo, obshi, restlo=None, resthi=None):
    """Calculate the K correction for a model.

    The K correction ([1]_, [2]_, [3]_, [4]_) is the numeric
    factor applied to measured energy fluxes to convert values in
    an observed energy band to that they are in a rest-frame
    energy band (that is, correct for the change in spectral shape
    between the rest-frame and observed-frame bands). This is
    often used when converting a flux into a luminosity.

    Parameters
    ----------
    data
       The data object to use.
    model
       The source expression: this should not include any instrument
       responses.
    z : number or array, >= 0
       The redshift, or redshifts, of the source.
    obslo : number
       The minimum energy of the observed band.
    obshi : number
       The maximum energy of the observed band, which must
       be larger than `obslo`.
    restlo : number or ``None``
       The minimum energy of the rest-frame band. If ``None`` then
       use `obslo`.
    restlo : number or ``None``
       The maximum energy of the rest-frame band. It must be
       larger than `restlo`. If ``None`` then use `obshi`.

    Returns
    -------
    kz : number or array of numbers

    Notes
    -----
    This is only defined when the analysis is in 'energy' units.

    If the model contains a redshift parameter then it should
    be set to ``0``, rather than the source redshift.

    If the source model is at zero redshift, the observed energy
    band is olo to ohi, and the rest frame band is rlo to rhi
    (which need not match the observed band), then the K
    correction at a redshift z can be calculated as::

      frest = calc_energy_flux(data, model, rlo, rhi)
      fobs  = calc_energy_flux(data, model, olo*(1+z), ohi*(1+z))
      kz    = frest / fobs

    The energy ranges used - rlo to rhi and olo*(1+z) to ohi*(1+z)
    - should be fully covered by the data grid, otherwise the flux
    calculation will be truncated at the grid boundaries, leading
    to incorrect results.

    References
    ----------

    .. [1] "The K correction", Hogg, D.W., et al.
           http://arxiv.org/abs/astro-ph/0210394

    .. [2] Appendix B of Jones et al. 1998, ApJ, vol 495,
           p. 100-114.
           http://adsabs.harvard.edu/abs/1998ApJ...495..100J

    .. [3] "K and evolutionary corrections from UV to IR",
           Poggianti, B.M., A&AS, 1997, vol 122, p. 399-407.
           http://adsabs.harvard.edu/abs/1997A%26AS..122..399P

    .. [4] "Galactic evolution and cosmology - Probing the
           cosmological deceleration parameter", Yoshii, Y. &
           Takahara, F., ApJ, 1988, vol 326, p. 1-18.
           http://adsabs.harvard.edu/abs/1988ApJ...326....1Y

    """

    if restlo is None:
        restlo = obslo
    if resthi is None:
        resthi = obshi

    if numpy.isscalar(z):
        z = numpy.array([z], dtype=float)
    else:
        z = numpy.asarray(z)

    if 0 != sum(z[z < 0]):
        raise IOErr('z<=0')

    if obslo <= 0 or restlo <= 0 or obshi <= obslo or resthi <= restlo:
        raise IOErr('erange')

    if hasattr(data, 'get_response'):
        arf, rmf = data.get_response()
        elo = data.bin_lo
        ehi = data.bin_hi
        if arf is not None:
            elo = arf.energ_lo
            ehi = arf.energ_hi
        elif rmf is not None:
            elo = rmf.energ_lo
            ehi = rmf.energ_hi
    else:
        elo, ehi = data.get_indep()

    if elo is None or ehi is None:
        raise DataErr('noenergybins', data.name)

    emin = elo[0]
    emax = ehi[-1]

    if restlo < emin or resthi > emax:
        raise IOErr('energoverlap', emin, emax, 'rest-frame', restlo, resthi,
                    '')

    if obslo * (1.0 + z.min()) < emin:
        raise IOErr('energoverlap', emin, emax, 'observed-frame', restlo,
                    resthi, "at a redshift of %f" % z.min())

    if obshi * (1.0 + z.max()) > emax:
        raise IOErr('energoverlap', emin, emax, 'rest-frame', restlo, resthi,
                    "at a redshift of %f" % z.min())

    zplus1 = z + 1.0
    flux_rest = _flux(data, restlo, resthi, model, eflux=True)
    obs = numpy.asarray([
        _flux(data, obslo * zz, obshi * zz, model, eflux=True) for zz in zplus1
    ],
                        dtype=float)
    kcorr = flux_rest / obs

    if len(kcorr) == 1:
        return kcorr[0]

    return kcorr
Пример #10
0
def _pack_pha(dataset):
    if not isinstance(dataset, DataPHA):
        raise IOErr('notpha', dataset.name)

    data = {}

    arf, rmf = dataset.get_response()
    bkg = dataset.get_background()

    # Header Keys
    header = {}
    if hasattr(dataset, 'header'):  #and type(dataset.header) is dict:
        header = dataset.header.copy()

    header['EXPOSURE'] = getattr(dataset, 'exposure', 'none')

    if rmf is not None:
        name = getattr(rmf, 'name', 'none')
        if ((name is not None) and (name.find('/') != -1)):
            name = name.split('/').pop()
        header['RESPFILE'] = name

    if bkg is not None:
        name = getattr(bkg, 'name', 'none')
        if ((name is not None) and (name.find('/') != -1)):
            name = name.split('/').pop()
        header['BACKFILE'] = name

    if arf is not None:
        name = getattr(arf, 'name', 'none')
        if ((name is not None) and (name.find('/') != -1)):
            name = name.split('/').pop()
        header['ANCRFILE'] = name

    # Columns
    col_names = [
        'channel', 'counts', 'stat_err', 'sys_err', 'bin_lo', 'bin_hi',
        'grouping', 'quality'
    ]

    data['channel'] = getattr(dataset, 'channel', None)
    data['counts'] = getattr(dataset, 'counts', None)
    data['stat_err'] = getattr(dataset, 'staterror', None)
    data['sys_err'] = getattr(dataset, 'syserror', None)
    data['bin_lo'] = getattr(dataset, 'bin_lo', None)
    data['bin_hi'] = getattr(dataset, 'bin_hi', None)
    data['grouping'] = getattr(dataset, 'grouping', None)
    data['quality'] = getattr(dataset, 'quality', None)

    backscal = getattr(dataset, 'backscal', None)
    if backscal is not None:
        if numpy.isscalar(backscal):
            header['BACKSCAL'] = backscal
        else:
            data['backscal'] = backscal
            col_names.append('backscal')

    areascal = getattr(dataset, 'areascal', None)
    if areascal is not None:
        if numpy.isscalar(areascal):
            header['AREASCAL'] = areascal
        else:
            data['areascal'] = areascal
            col_names.append('areascal')

    return data, col_names, header
Пример #11
0
def _flux(data, lo, hi, src, eflux=False, srcflux=False):
    lo, hi = bounds_check(lo, hi)

    try:
        method = data._get_indep
    except AttributeError:
        method = data.get_indep

    axislist = method(filter=False)
    dim = numpy.asarray(axislist).squeeze().ndim
    if dim > 2:
        raise IOErr('>axes', "2")

    # assume this should not happen, so we do not have to worry
    # about a nice error message
    assert dim > 0

    # To make things simpler, evaluate on the full grid
    y = src(*axislist)

    if srcflux and dim == 2:
        y /= numpy.asarray(axislist[1] - axislist[0])

    if eflux:
        # for energy flux, the sum of grid below must be in keV.
        #
        energ = []
        convert = hasattr(data, 'units') and data.units == 'wavelength'

        for axis in axislist:
            grid = axis
            if convert:
                grid = data._hc / grid
            energ.append(grid)

        if dim == 2:
            ecorr = 0.5 * (energ[0] + energ[1])
        else:
            # why multiply by 0.5?
            ecorr = 0.5 * energ[0]

        y *= ecorr

    # What bins do we use for the calculation? Linear interpolation
    # is used for bin edges (for integrated data sets)
    #
    if dim == 1:
        mask = filter_bins((lo, ), (hi, ), (axislist[0], ))
        assert mask is not None

        # no bin found
        if numpy.all(~mask):
            return 0.0

        # convert boolean to numbers
        scale = 1.0 * mask

    else:
        scale = range_overlap_1dint(axislist, lo, hi)
        if scale is None:
            return 0.0

        assert scale.max() > 0

    # Originally a flux density was calculated if both lo and hi
    # fell in the same bin, but this has been changed so that
    # we only calculate a density if the lo and hi values are the
    # same (which is set by bounds_check when a density is requested).
    #
    if lo is not None and dim == 2 and lo == hi:
        assert scale.sum() == 1, 'programmer error: sum={}'.format(scale.sum())
        y /= numpy.abs(axislist[1] - axislist[0])

    flux = (scale * y).sum()
    if eflux:
        flux *= _charge_e

    return flux
Пример #12
0
def write_arrays(filename,
                 args,
                 fields=None,
                 sep=' ',
                 comment='#',
                 clobber=False,
                 linebreak='\n',
                 format='%g'):
    """Write a list of arrays to an ASCII file.

    Parameters
    ----------
    filename : str
       The name of the file to write the array to.
    args : array_like
       The arrays to write out.
    fields : array_like of str
       The column names (should match the size of ``args`` if given).
    sep : str, optional
       The separator character. The default is ``' '``.
    comment : str, optional
       The comment character. The default is ``'#'``. This is only used
       to write out the column names when ``fields`` is not None.
    clobber : bool, optional
       If ``filename`` is not ``None``, then this flag controls
       whether an existing file can be overwritten (``True``)
       or if it raises an exception (``False``, the default
       setting).
    linebreak : str, optional
       Indicate a new line. The default is ``'\\n'``.
    format : str, optional
       The format used to write out the numeric values. The
       default is ``'%g%'``.

    Raises
    ------
    sherpa.utils.err.IOErr
       If ``filename`` already exists and ``clobber`` is ``False``
       or if there is no data to write.

    See Also
    --------
    get_ascii_data

    Examples
    --------

    Write the x and y arrays to the file 'src.dat':

    >>> write_arrays('src.dat', [x, y])

    Use the column names "r" and "surbri" for the columns:

    >>> write_arrays('prof.txt', [x, y], fields=["r", "surbri"],
                     clobber=True)

    """
    if os.path.isfile(filename) and not clobber:
        raise IOErr("filefound", filename)

    if not numpy.iterable(args) or len(args) == 0:
        raise IOErr('noarrayswrite')

    if not numpy.iterable(args[0]):
        raise IOErr('noarrayswrite')

    size = len(args[0])
    for arg in args:
        if not numpy.iterable(arg):
            raise IOErr('noarrayswrite')
        elif len(arg) != size:
            raise IOErr('arraysnoteq')

    args = numpy.column_stack(numpy.asarray(args))

    f = open(filename, 'w')

    if fields is not None:
        f.write(comment + sep.join(fields) + linebreak)

    lines = []
    for arg in args:
        line = [format % elem for elem in arg]
        lines.append(sep.join(line))

    f.write(linebreak.join(lines))

    # add a newline at end
    f.write(linebreak)
    f.close()
Пример #13
0
def get_ascii_data(filename,
                   ncols=1,
                   colkeys=None,
                   sep=' ',
                   dstype=Data1D,
                   comment='#',
                   require_floats=True):
    r"""Read in columns from an ASCII file.

    Parameters
    ----------
    filename : str
       The name of the ASCII file to read in.
    ncols : int, optional
       The number of columns to read in (the first ``ncols`` columns
       in the file). This is ignored if ``colkeys`` is given.
    colkeys : array of str, optional
       An array of the column name to read in. The default is
       ``None``.
    sep : str, optional
       The separator character. The default is ``' '``.
    dstype : data class to use, optional
       Used to check that the data file contains enough columns.
    comment : str, optional
       The comment character. The default is ``'#'``.
    require_floats : bool, optional
       If ``True`` (the default), non-numeric data values will
       raise a `ValueError`.

    Returns
    -------
    (colnames, coldata, filename)
       The column names read in, the data for the columns
       as an array, with each element being the data for the column
       (the order matches ``colnames``), and the name of the file.

    Raises
    ------
    sherpa.utils.IOErr
       Raised if a requested column is missing or the file appears
       to be a binary file.
    ValueError
       If a column value can not be converted into a numeric value
       and the ``require_floats`` parameter is True.

    See Also
    --------
    read_arrays, read_data, write_arrays, write_data

    Notes
    -----
    The file is processed by reading in each line, stripping out any
    unsupported characters (replacing them by the ``sep`` argument),
    skipping empty lines, and then identifying comment and data lines.

    The list of unsupported characters are: ``\t``, ``\n``,
    ``\r``, comma, semi-colon, colon, space, and ``|``.

    The last comment line before the data is used to define the
    column names, splitting the line by the ``sep`` argument.
    If there are no comment lines then the columns are named
    starting at ``col1``, ``col2``, up to the number of columns.

    Data lines are separated into columns - splitting by the
    ``sep`` comment - and then converted to NumPy arrays.
    If the ``require_floats`` argument is ``True`` then the
    column will be converted to the `sherpa.utils.SherpaFloat`
    type, with an error raised if this fails.

    An error is raised if the number of columns per row
    is not constant.

    If the ``colkeys`` argument is used then a case-sensitive
    match is used to determine what columns to return.

    Examples
    --------

    Read in the first column from the file:

    >>> (colnames, coldata, fname) = get_ascii_data('src.dat')

    Read in the first three columns from the file:

    >>> colinfo = get_ascii_data('src.dat', ncols=3)

    Read in a histogram data set, using the columns XLO, XHI,
    and Y:

    >>> cols = ['XLO', 'XHI', 'Y']
    >>> res = get_ascii_data('hist.dat', colkeys=cols,
                             dstype=sherpa.data.Data1DInt)

    Read in the first and third column from the file cols.dat,
    where the file has no header information:

    >>> res = get_ascii_data('cols.dat', colkeys=['col1', 'col3'])

    """

    if is_binary_file(filename):
        raise IOErr('notascii', filename)

    names, args = read_file_data(filename, sep, comment, require_floats)

    if colkeys is None:
        kwargs = []
        if ncols != 1:
            _check_args(ncols, dstype)
        kwargs.extend(args[:ncols])
        return (names, kwargs, filename)

    kwargs = []
    colkeys = list(colkeys)

    if len(names) > len(args):
        raise IOErr('toomanycols')

    assert (len(names) <= len(args))

    for key in colkeys:
        if key not in names:
            raise IOErr('reqcol', key, numpy.asarray(names, numpy.string_))
        kwargs.append(args[names.index(key)])

    _check_args(len(kwargs), dstype)
    return (colkeys, kwargs, filename)
Пример #14
0
def get_image_data(arg, make_copy=False):
    """
    arg is a filename or a HDUList object
    """
    hdu, filename = _get_file_contents(arg)

    #   FITS uses logical-to-world where we use physical-to-world.
    #   For all transforms, update their physical-to-world
    #   values from their logical-to-world values.
    #   Find the matching physical transform
    #      (same axis no, but sub = 'P' )
    #   and use it for the update.
    #   Physical tfms themselves do not get updated.
    #
    #  Fill the physical-to-world transform given the
    #  logical-to-world and the associated logical-to-physical.
    #      W = wv + wd * ( P - wp )
    #      P = pv + pd * ( L - pp )
    #      W = lv + ld * ( L - lp )
    # Then
    #      L = pp + ( P - pv ) / pd
    # so   W = lv + ld * ( pp + (P-pv)/pd - lp )
    #        = lv + ( ld / pd ) * ( P - [ pv +  (lp-pp)*pd ] )
    # Hence
    #      wv = lv
    #      wd = ld / pd
    #      wp = pv + ( lp - pp ) * pd

    #  EG suppose phys-to-world is
    #         W =  1000 + 2.0 * ( P - 4.0 )
    #  and we bin and scale to generate a logical-to-phys of
    #         P =  20 + 4.0 * ( L - 10 )
    #  Then
    #         W = 1000 + 2.0 * ( (20-4) - 4 * 10 ) + 2 * 4 $
    #

    try:
        data = {}

        img = hdu[0]
        if hdu[0].data is None:
            img = hdu[1]
            if hdu[1].data is None:
                raise IOErr('badimg', '')

        data['y'] = numpy.asarray(img.data)

        cdeltp = _get_wcs_key(img, 'CDELT1P', 'CDELT2P')
        crpixp = _get_wcs_key(img, 'CRPIX1P', 'CRPIX2P')
        crvalp = _get_wcs_key(img, 'CRVAL1P', 'CRVAL2P')
        cdeltw = _get_wcs_key(img, 'CDELT1', 'CDELT2')
        crpixw = _get_wcs_key(img, 'CRPIX1', 'CRPIX2')
        crvalw = _get_wcs_key(img, 'CRVAL1', 'CRVAL2')

        # proper calculation of cdelt wrt PHYSICAL coords
        if (isinstance(cdeltw, numpy.ndarray)
                and isinstance(cdeltp, numpy.ndarray)):
            cdeltw = cdeltw / cdeltp

        # proper calculation of crpix wrt PHYSICAL coords
        if (isinstance(crpixw, numpy.ndarray)
                and isinstance(crvalp, numpy.ndarray)
                and isinstance(cdeltp, numpy.ndarray)
                and isinstance(crpixp, numpy.ndarray)):
            crpixw = crvalp + (crpixw - crpixp) * cdeltp

        sky = None
        if (transformstatus and isinstance(cdeltp, numpy.ndarray)
                and isinstance(crpixp, numpy.ndarray)
                and isinstance(crvalp, numpy.ndarray)):
            sky = WCS('physical', 'LINEAR', crvalp, crpixp, cdeltp)

        eqpos = None
        if (transformstatus and isinstance(cdeltw, numpy.ndarray)
                and isinstance(crpixw, numpy.ndarray)
                and isinstance(crvalw, numpy.ndarray)):
            eqpos = WCS('world', 'WCS', crvalw, crpixw, cdeltw)

        data['sky'] = sky
        data['eqpos'] = eqpos
        data['header'] = _get_meta_data(img)

        keys = [
            'MTYPE1', 'MFORM1', 'CTYPE1P', 'CTYPE2P', 'WCSNAMEP', 'CDELT1P',
            'CDELT2P', 'CRPIX1P', 'CRPIX2P', 'CRVAL1P', 'CRVAL2P', 'MTYPE2',
            'MFORM2', 'CTYPE1', 'CTYPE2', 'CDELT1', 'CDELT2', 'CRPIX1',
            'CRPIX2', 'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2', 'EQUINOX'
        ]

        for key in keys:
            try:
                data['header'].pop(key)
            except KeyError:
                pass

    finally:
        hdu.close()

    return data, filename
Пример #15
0
def set_image_data(filename,
                   data,
                   header,
                   ascii=False,
                   clobber=False,
                   packup=False):

    if not packup and os.path.isfile(filename) and not clobber:
        raise IOErr('filefound', filename)

    img = pycrates.IMAGECrate()

    # Write Image Header Keys
    for key in header.keys():
        if header[key] is None:
            continue
        _set_key(img, key, header[key])

    # Write Image WCS Header Keys
    if data['eqpos'] is not None:
        cdeltw = data['eqpos'].cdelt
        crvalw = data['eqpos'].crval
        crpixw = data['eqpos'].crpix
        equin = data['eqpos'].equinox

    if data['sky'] is not None:
        cdeltp = data['sky'].cdelt
        crvalp = data['sky'].crval
        crpixp = data['sky'].crpix

        _set_key(img, 'MTYPE1', 'sky     ')
        _set_key(img, 'MFORM1', 'x,y     ')
        _set_key(img, 'CTYPE1P', 'x       ')
        _set_key(img, 'CTYPE2P', 'y       ')
        _set_key(img, 'WCSNAMEP', 'PHYSICAL')
        _set_key(img, 'CDELT1P', cdeltp[0])
        _set_key(img, 'CDELT2P', cdeltp[1])
        _set_key(img, 'CRPIX1P', crpixp[0])
        _set_key(img, 'CRPIX2P', crpixp[1])
        _set_key(img, 'CRVAL1P', crvalp[0])
        _set_key(img, 'CRVAL2P', crvalp[1])

        if data['eqpos'] is not None:
            # Simply the inverse of read transformations in get_image_data
            cdeltw = cdeltw * cdeltp
            crpixw = ((crpixw - crvalp) / cdeltp + crpixp)

    if data['eqpos'] is not None:
        _set_key(img, 'MTYPE2', 'EQPOS   ')
        _set_key(img, 'MFORM2', 'RA,DEC  ')
        _set_key(img, 'CTYPE1', 'RA---TAN')
        _set_key(img, 'CTYPE2', 'DEC--TAN')
        _set_key(img, 'CDELT1', cdeltw[0])
        _set_key(img, 'CDELT2', cdeltw[1])
        _set_key(img, 'CRPIX1', crpixw[0])
        _set_key(img, 'CRPIX2', crpixw[1])
        _set_key(img, 'CRVAL1', crvalw[0])
        _set_key(img, 'CRVAL2', crvalw[1])
        _set_key(img, 'EQUINOX', equin)

    # Write Image pixel values
    pix_col = pycrates.CrateData()
    pix_col.values = data['pixels']
    #pycrates.add_piximg(img, pix_col)
    img.add_image(pix_col)

    if packup:
        return img

    if ascii and '[' not in filename and ']' not in filename:
        #filename += "[opt kernel=text/simple]"
        raise IOErr('writenoimg')

    #pycrates.write_file(img, filename)
    img.write(filename, clobber=True)
    close_crate_dataset(img.get_dataset())
Пример #16
0
def get_rmf_data(arg, make_copy=False):
    """arg is a filename or a HDUList object.

    Notes
    -----
    The RMF format is described in [1]_.

    References
    ----------

    .. [1] OGIP Calibration Memo CAL/GEN/92-002, "The Calibration
           Requirements for Spectral Analysis (Definition of RMF and
           ARF file formats)", Ian M. George1, Keith A. Arnaud,
           Bill Pence, Laddawan Ruamsuwan and Michael F. Corcoran,
           https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html

    """

    rmf, filename = _get_file_contents(arg,
                                       exptype="BinTableHDU",
                                       nobinary=True)

    try:
        if _has_hdu(rmf, 'MATRIX'):
            hdu = rmf['MATRIX']
        elif _has_hdu(rmf, 'SPECRESP MATRIX'):
            hdu = rmf['SPECRESP MATRIX']
        elif _has_hdu(rmf, 'AXAF_RMF'):
            hdu = rmf['AXAF_RMF']
        elif _is_ogip_type(rmf, 'RESPONSE', bltype2='RSP_MATRIX'):
            hdu = rmf[1]
        else:
            raise IOErr('notrsp', filename, 'an RMF')

        data = {}

        data['detchans'] = SherpaUInt(_require_key(hdu, 'DETCHANS'))
        data['energ_lo'] = _require_col(hdu, 'ENERG_LO', fix_type=True)
        data['energ_hi'] = _require_col(hdu, 'ENERG_HI', fix_type=True)
        data['n_grp'] = _require_col(hdu,
                                     'N_GRP',
                                     fix_type=True,
                                     dtype=SherpaUInt)
        data['f_chan'] = _require_vec(hdu,
                                      'F_CHAN',
                                      fix_type=True,
                                      dtype=SherpaUInt)
        data['n_chan'] = _require_vec(hdu,
                                      'N_CHAN',
                                      fix_type=True,
                                      dtype=SherpaUInt)

        # Read MATRIX as-is -- we will flatten it below, because
        # we need to remove all rows corresponding to n_grp[row] == 0
        #
        # TODO: I would expect this to error out if MATRIX is not available
        #
        data['matrix'] = None
        if 'MATRIX' in hdu.columns.names:
            data['matrix'] = hdu.data.field('MATRIX')

        data['header'] = _get_meta_data(hdu)
        data['header'].pop('DETCHANS')

        # Beginning of non-Chandra RMF support
        fchan_col = list(hdu.columns.names).index('F_CHAN') + 1
        tlmin = _try_key(hdu, 'TLMIN' + str(fchan_col), True, SherpaUInt)

        if tlmin is not None:
            data['offset'] = tlmin
        else:
            # QUS: should this actually be an error, rather than just
            #      something that is logged to screen?
            error("Failed to locate TLMIN keyword for F_CHAN" +
                  " column in RMF file '%s'; " % filename +
                  'Update the offset value in the RMF data set to' +
                  ' appropriate TLMIN value prior to fitting')

        if _has_hdu(rmf, 'EBOUNDS'):
            hdu = rmf['EBOUNDS']
            data['e_min'] = _try_col(hdu, 'E_MIN', fix_type=True)
            data['e_max'] = _try_col(hdu, 'E_MAX', fix_type=True)

            # Beginning of non-Chandra RMF support
            chan_col = list(hdu.columns.names).index('CHANNEL') + 1
            tlmin = _try_key(hdu, 'TLMIN' + str(chan_col), True, SherpaUInt)
            if tlmin is not None:
                data['offset'] = tlmin

        else:
            data['e_min'] = None
            data['e_max'] = None
    finally:
        rmf.close()

    # Remove any rows from the MATRIX and F_CHAN/N_CHAN where N_GRP is 0,
    # since they do not add any data. This is to match the Crates backend.
    # Note that crates uses the sherpa.astro.utils.resp_init routine,
    # but it's not clear why, so it is not used here for now.
    #
    good = (data['n_grp'] > 0)
    data['matrix'] = data['matrix'][good]

    if isinstance(data['matrix'], _VLF):
        data['matrix'] = numpy.concatenate(
            [numpy.asarray(row) for row in data['matrix']])

    else:
        # Flatten the array. There are two cases here:
        # a) the full matrix is given (that is, n_grp is 1 and n_chan
        #    = number of channels for each row)
        # b) a rectangular matrix is given, but a row can contain
        #    unused data (outside the f_chan range)
        #
        # Case a can be handled easily, but it's probably not worth
        # having a special case for this.
        #
        matrix = data['matrix']
        n_grp = data['n_grp'][good]
        f_chan = data['f_chan'][good]
        n_chan = data['n_chan'][good]

        rowdata = []
        for mrow, ng, ncs in zip(matrix, n_grp, n_chan):
            # Need a RMF which ng>1 to test this with.
            if ng == 1:
                ncs = [ncs]

            start = 0
            for nc in ncs:
                # n_chan can be an unsigned integer. Adding a Python
                # integer to a NumPy unsigned integer appears to return
                # a float.
                end = start + numpy.int(nc)

                # "perfect" RMFs may have mrow as a scalar
                try:
                    rdata = mrow[start:end]
                except IndexError:
                    if start != 0 or end != 1:
                        raise IOErr('bad', 'format',
                                    'MATRIX column formatting')

                    rdata = [mrow]

                rowdata.append(rdata)
                start = end

        data['matrix'] = numpy.concatenate(rowdata)

    data['matrix'] = data['matrix'].astype(SherpaFloat)

    # Flatten f_chan and n_chan vectors into 1D arrays as crates does
    # according to group
    #
    if data['f_chan'].ndim > 1 and data['n_chan'].ndim > 1:
        f_chan = []
        n_chan = []
        for grp, fch, nch, in zip(data['n_grp'], data['f_chan'],
                                  data['n_chan']):
            for i in range(grp):
                f_chan.append(fch[i])
                n_chan.append(nch[i])

        # This automatically filters out rows where N_GRP is 0.
        #
        data['f_chan'] = numpy.asarray(f_chan, SherpaUInt)
        data['n_chan'] = numpy.asarray(n_chan, SherpaUInt)
    else:
        if len(data['n_grp']) == len(data['f_chan']):
            # filter out groups with zeroes.
            good = (data['n_grp'] > 0)
            data['f_chan'] = data['f_chan'][good]
            data['n_chan'] = data['n_chan'][good]

    return data, filename
Пример #17
0
def get_table_data(arg,
                   ncols=1,
                   colkeys=None,
                   make_copy=True,
                   fix_type=True,
                   blockname=None,
                   hdrkeys=None):
    """
    get_table_data( filename , ncols=1 [, colkeys=None [, make_copy=True [,
                    fix_type=True [, blockname=None [, hdrkeys=None ]]]]])

    get_table_data( TABLECrate , ncols=1 [, colkeys=None [, make_copy=True [,
                    fix_type=True [, blockname=None [, hdrkeys=None ] ]]]])
    """
    filename = ''
    close_dataset = False
    if type(arg) == str:

        arg = get_filename_from_dmsyntax(arg)
        tbl = open_crate(arg)
        if not isinstance(tbl, pycrates.TABLECrate):
            #######??????????????????????????????????######## dtn
            close_crate_dataset(tbl.get_dataset())
            #######??????????????????????????????????######## dtn
            raise IOErr('badfile', arg, 'TABLECrate obj')

        filename = tbl.get_filename()
        close_dataset = True

        # Make a copy of the data, since we don't know that pycrates will
        # do something sensible wrt reference counting
    elif isinstance(arg, pycrates.TABLECrate):
        tbl = arg
        filename = arg.get_filename()
        make_copy = False
    else:
        raise IOErr('badfile', arg, 'TABLECrate obj')

    # Crates "caches" open files by their filename in memory.  If you try
    # to open a file multiple times (with DM syntax) it corrupts the Crate
    # in memory.  This is a work-around to open the CrateDataset without
    # DM syntax and iterate through the crates looking for the block
    # name that matches.
    if blockname is not None:
        crate = _get_crate_by_blockname(tbl.get_dataset(), blockname)
        tbl = crate or tbl

    cnames = list(pycrates.get_col_names(tbl, vectors=False, rawonly=True))

    if colkeys is not None:
        colkeys = [str(name).strip() for name in list(colkeys)]

    elif (type(arg) == str and (not os.path.isfile(arg)) and '[' in arg
          and ']' in arg):
        colkeys = cnames

    # Try Channel, Counts or X,Y before defaulting to first two table cols
    elif 'CHANNEL' in cnames and 'COUNTS' in cnames:
        colkeys = ['CHANNEL', 'COUNTS']

    elif 'X' in cnames and 'Y' in cnames:
        colkeys = ['X', 'Y']

    else:
        colkeys = cnames[:ncols]

    cols = []
    for name in colkeys:
        for col in _require_tbl_col(tbl, name, cnames, make_copy, fix_type):
            cols.append(col)

    hdr = {}
    if hdrkeys is not None:
        for key in hdrkeys:
            hdr[key] = _require_hdr_key(tbl, key)

    if close_dataset:
        close_crate_dataset(tbl.get_dataset())
    return colkeys, cols, filename, hdr
Пример #18
0
def set_image_data(filename, data, header, ascii=False, clobber=False,
                   packup=False):

    if not packup and not clobber and os.path.isfile(filename):
        raise IOErr("filefound", filename)

    if ascii:
        set_arrays(filename, [data['pixels'].ravel()],
                   ascii=ascii, clobber=clobber)
        return

    hdrlist = _create_header(header)

    # Write Image WCS Header Keys
    if data['eqpos'] is not None:
        cdeltw = data['eqpos'].cdelt
        crpixw = data['eqpos'].crpix
        crvalw = data['eqpos'].crval
        equin  = data['eqpos'].equinox

    if data['sky'] is not None:
        cdeltp = data['sky'].cdelt
        crpixp = data['sky'].crpix
        crvalp = data['sky'].crval

        _add_keyword(hdrlist, 'MTYPE1', 'sky     ')
        _add_keyword(hdrlist, 'MFORM1', 'x,y     ')
        _add_keyword(hdrlist, 'CTYPE1P', 'x      ')
        _add_keyword(hdrlist, 'CTYPE2P', 'y      ')
        _add_keyword(hdrlist, 'WCSNAMEP', 'PHYSICAL')
        _add_keyword(hdrlist, 'CDELT1P', cdeltp[0])
        _add_keyword(hdrlist, 'CDELT2P', cdeltp[1])
        _add_keyword(hdrlist, 'CRPIX1P', crpixp[0])
        _add_keyword(hdrlist, 'CRPIX2P', crpixp[1])
        _add_keyword(hdrlist, 'CRVAL1P', crvalp[0])
        _add_keyword(hdrlist, 'CRVAL2P', crvalp[1])

        if data['eqpos'] is not None:
            # Simply the inverse of read transformations in get_image_data
            cdeltw = cdeltw * cdeltp
            crpixw = ((crpixw - crvalp) / cdeltp + crpixp )

    if data['eqpos'] is not None:
        _add_keyword(hdrlist, 'MTYPE2', 'EQPOS   ')
        _add_keyword(hdrlist, 'MFORM2', 'RA,DEC  ')
        _add_keyword(hdrlist, 'CTYPE1', 'RA---TAN')
        _add_keyword(hdrlist, 'CTYPE2', 'DEC--TAN')
        _add_keyword(hdrlist, 'CDELT1', cdeltw[0])
        _add_keyword(hdrlist, 'CDELT2', cdeltw[1])
        _add_keyword(hdrlist, 'CRPIX1', crpixw[0])
        _add_keyword(hdrlist, 'CRPIX2', crpixw[1])
        _add_keyword(hdrlist, 'CRVAL1', crvalw[0])
        _add_keyword(hdrlist, 'CRVAL2', crvalw[1])
        _add_keyword(hdrlist, 'CUNIT1', 'deg     ')
        _add_keyword(hdrlist, 'CUNIT2', 'deg     ')
        _add_keyword(hdrlist, 'EQUINOX', equin)

    #
    img = fits.PrimaryHDU(data['pixels'], header=fits.Header(hdrlist))
    if packup:
        return img
    img.writeto(filename, clobber=True)
Пример #19
0
def get_image_data(arg, make_copy=True, fix_type=True):
    """
    get_image_data ( filename [, make_copy=True, fix_type=True ])

    get_image_data ( IMAGECrate [, make_copy=True, fix_type=True ])
    """
    filename = ''
    close_dataset = False
    if type(arg) == str:
        img = open_crate(arg)

        if not isinstance(img, pycrates.IMAGECrate):
            #######??????????????????????????????????######## dtn
            close_crate_dataset(img.get_dataset())
            #######??????????????????????????????????######## dtn
            raise IOErr('badfile', arg, "IMAGECrate obj")

        filename = arg
        close_dataset = True

    elif isinstance(arg, pycrates.IMAGECrate):
        img = arg
        filename = arg.get_filename()
        make_copy = False

    else:
        raise IOErr('badfile', arg, "IMAGECrate obj")

    data = {}

    data['y'] = _require_image(img, make_copy, fix_type)

    sky = None
    skynames = ['SKY', 'sky', 'pos', 'POS']
    names = img.get_axisnames()

    # find the SKY name using the set intersection
    inter = list(set(names) & set(skynames))
    if inter:
        sky = img.get_transform(inter[0])

    wcs = None
    if 'EQPOS' in names:
        wcs = img.get_transform('EQPOS')

    if sky is not None and transformstatus:
        linear = pycrates.WCSTANTransform()
        linear.set_name("LINEAR")
        linear.set_transform_matrix(sky.get_transform_matrix())
        cdelt = numpy.array(linear.get_parameter_value('CDELT'))
        crpix = numpy.array(linear.get_parameter_value('CRPIX'))
        crval = numpy.array(linear.get_parameter_value('CRVAL'))
        data['sky'] = WCS('physical', 'LINEAR', crval, crpix, cdelt)

    if wcs is not None and transformstatus:
        cdelt = numpy.array(wcs.get_parameter_value('CDELT'))
        crpix = numpy.array(wcs.get_parameter_value('CRPIX'))
        crval = numpy.array(wcs.get_parameter_value('CRVAL'))
        crota = SherpaFloat(wcs.get_parameter_value('CROTA'))
        equin = SherpaFloat(wcs.get_parameter_value('EQUINOX'))
        epoch = SherpaFloat(wcs.get_parameter_value('EPOCH'))
        data['eqpos'] = WCS('world', 'WCS', crval, crpix, cdelt, crota, epoch,
                            equin)

    data['header'] = _get_meta_data(img)

    keys = [
        'MTYPE1', 'MFORM1', 'CTYPE1P', 'CTYPE2P', 'WCSNAMEP', 'CDELT1P',
        'CDELT2P', 'CRPIX1P', 'CRPIX2P', 'CRVAL1P', 'CRVAL2P', 'MTYPE2',
        'MFORM2', 'CTYPE1', 'CTYPE2', 'CDELT1', 'CDELT2', 'CRPIX1', 'CRPIX2',
        'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2', 'EQUINOX'
    ]
    #            'WCSTY1P', 'WCSTY2P']

    for key in keys:
        try:
            data['header'].pop(key)
        except KeyError:
            pass

    if close_dataset:
        close_crate_dataset(img.get_dataset())
    return data, filename
Пример #20
0
def _require_col(hdu, name, dtype=SherpaFloat, fix_type=False):
    col = _try_col(hdu, name, dtype, fix_type)
    if col is None:
        raise IOErr('reqcol', name, hdu._file.name)
    return col
Пример #21
0
def get_rmf_data(arg, make_copy=True):
    """
    get_rmf_data( filename [, make_copy=True ])

    get_rmf_data( RMFCrate [, make_copy=True ])
    """
    filename = ''
    close_dataset = False
    if type(arg) == str:
        rmfdataset = open_crate_dataset(
            arg, pycrates.rmfcratedataset.RMFCrateDataset)

        #if (isinstance(rmfdataset, (pycrates.TABLECrate, pycrates.IMAGECrate)) or
        #    pycrates.is_pha(rmfdataset) == 1):
        #    raise IOErr('badfile', arg, "RMFCrateDataset obj")

        if pycrates.is_rmf(rmfdataset) != 1:
            raise IOErr('badfile', arg, "RMFCrateDataset obj")

        filename = arg
        close_dataset = True

    elif pycrates.is_rmf(arg) == 1:
        rmfdataset = arg
        filename = arg.get_filename()
        make_copy = False

    else:
        raise IOErr('badfile', arg, "RMFCrateDataset obj")

    # Open the response matrix by extension name, and try using
    # some of the many, many ways people break the OGIP definition
    # of the extension name for the response matrix.
    rmf = _get_crate_by_blockname(rmfdataset, 'MATRIX')

    if rmf is None:
        rmf = _get_crate_by_blockname(rmfdataset, 'SPECRESP MATRIX')

    if rmf is None:
        rmf = _get_crate_by_blockname(rmfdataset, 'AXAF_RMF')

    if rmf is None:
        rmf = _get_crate_by_blockname(rmfdataset, 'RSP_MATRIX')

    if rmf is None:
        try:
            rmf = rmfdataset.get_crate(2)
        except IndexError:
            rmf = None

    if rmf is None or rmf.get_colnames() is None:
        raise IOErr('filenotfound', arg)

    data = {}

    if not rmf.column_exists('ENERG_LO'):
        raise IOErr('reqcol', 'ENERG_LO', filename)

    if not rmf.column_exists('ENERG_HI'):
        raise IOErr('reqcol', 'ENERG_HI', filename)

    # FIXME: this will be a problem now that we have
    # to pass the name of the matrix column

    if not rmf.column_exists('MATRIX'):
        raise IOErr('reqcol', 'MATRIX', filename)

    if not rmf.column_exists('N_GRP'):
        raise IOErr('reqcol', 'N_GRP', filename)

    if not rmf.column_exists('F_CHAN'):
        raise IOErr('reqcol', 'F_CHAN', filename)

    if not rmf.column_exists('N_CHAN'):
        raise IOErr('reqcol', 'N_CHAN', filename)

    data['detchans'] = _require_hdr_key(rmf, 'DETCHANS', SherpaInt)
    data['energ_lo'] = _require_col(rmf, 'ENERG_LO', make_copy, fix_type=True)
    data['energ_hi'] = _require_col(rmf, 'ENERG_HI', make_copy, fix_type=True)
    data['n_grp'] = _require_col(rmf,
                                 'N_GRP',
                                 make_copy,
                                 dtype=SherpaUInt,
                                 fix_type=True)

    f_chan = rmf.get_column('F_CHAN')
    offset = f_chan.get_tlmin()

    fcbuf = _require_col(rmf, 'F_CHAN', make_copy)
    ncbuf = _require_col(rmf, 'N_CHAN', make_copy)

    respbuf = _require_col_list(rmf, 'MATRIX', 1, make_copy)

    #ebounds = None
    #if rmfdataset.get_current_crate() < rmfdataset.get_ncrates():
    #    ebounds = rmfdataset.get_crate(rmfdataset.get_current_crate() + 1)
    ebounds = _get_crate_by_blockname(rmfdataset, 'EBOUNDS')

    if ebounds is None:
        ebounds = rmfdataset.get_crate(3)

    data['header'] = _get_meta_data(rmf)
    data['header'].pop('DETCHANS')

    channel = None
    if ebounds is not None:
        data['e_min'] = _try_col(ebounds, 'E_MIN', make_copy, fix_type=True)
        data['e_max'] = _try_col(ebounds, 'E_MAX', make_copy, fix_type=True)
        if ebounds.column_exists('CHANNEL'):
            channel = ebounds.get_column('CHANNEL')

        # FIXME: do I include the header keywords from ebounds
        # data['header'].update(_get_meta_data(ebounds))

    if offset < 0:
        error("Failed to locate TLMIN keyword for F_CHAN" +
              " column in RMF file '%s'; " % filename +
              'Update the offset value in the RMF data set to' +
              ' appropriate TLMIN value prior to fitting')

    if offset < 0 and channel is not None:
        offset = channel.get_tlmin()

    # If response is non-OGIP, tlmin is -(max of type), so resort to default
    if not (offset < 0):
        data['offset'] = offset

    #
    # FIXME:
    #
    # Currently, CRATES does something screwy:  If n_grp is zero in a bin,
    # it appends a zero to f_chan, n_chan, and matrix.  I have no idea what
    # the logic behind this is -- why would you add data that you know you
    # don't need?  Although it's easy enough to filter the zeros out of
    # f_chan and n_chan, it's harder for matrix, since zero is a legitimate
    # value there.
    #
    # I think this crazy behavior of CRATES should be changed, but for the
    # moment we'll just punt in this case.  (If we don't, the calculation
    # in rmf_fold() will be trashed.)

    # CRATES does not support variable length arrays, so here we condense
    # the array of tuples into the proper length array

    chan_width = data['n_grp'].max()
    resp_width = 0
    if len(respbuf.shape) > 1:
        resp_width = respbuf.shape[1]

    (data['f_chan'], data['n_chan'],
     data['matrix']) = resp_init(data['n_grp'], fcbuf, ncbuf, chan_width,
                                 respbuf.ravel(), resp_width)

    if close_dataset:
        close_crate_dataset(rmfdataset)
    return data, filename
Пример #22
0
def _require_vec(hdu, name, size=2, dtype=SherpaFloat, fix_type=False):
    col = _try_vec(hdu, name, size, dtype, fix_type)
    if numpy.equal(col, None).any():
        raise IOErr('reqcol', name, hdu._file.name)
    return col
Пример #23
0
def get_table_data(arg, ncols=1, colkeys=None, make_copy=False, fix_type=False,
                   blockname = None, hdrkeys=None):
    """
    get_table_data( filename , ncols=1 [, colkeys=None [, make_copy=False [, blockname=None [, hdrkeys=None ]]]])

    get_table_data( [PrimaryHDU, BinTableHDU] , ncols=1 [, colkeys=None [, make_copy=False [, blockname=None [, hdrkeys=None ]]]])
    """
    filename = ''
    if type(arg) == str and is_binary_file(arg):
        tbl = pyfits.open(arg)
        filename = arg
    elif ( (type(arg) is pyfits.HDUList) and
           (len(arg) > 0) and
           (arg[0].__class__ is pyfits.PrimaryHDU) ):
        tbl = arg
        filename = tbl[0]._file.name
    else:
        raise IOErr('badfile', arg, "a binary FITS table or a PyFITS.BinTableHDU list")

    try:
        # Use the first binary table extension we find.  Throw an exception
        # if there aren't any.
        for hdu in tbl:
            if blockname is None:
                if hdu.__class__ is pyfits.BinTableHDU:
                    break
                else:
                    continue
            elif (hdu.name.lower() == str(blockname).strip().lower() and
                  hdu.__class__ is pyfits.BinTableHDU):
                break

        else:
            raise IOErr('badext', filename)

        cnames = list(hdu.columns.names)

        if colkeys is not None:
            colkeys = [name.strip().upper() for name in list(colkeys)]
        # Try Channel, Counts or X,Y before defaulting to first two table cols
        elif ('CHANNEL' in cnames) and ('COUNTS' in cnames):
            colkeys = ['CHANNEL','COUNTS']
        elif ('X' in cnames) and ('Y' in cnames):
            colkeys = ['X','Y']
        else:
            colkeys = cnames[:ncols]

        cols = []
        for name in colkeys:
            for col in _require_tbl_col(hdu, name, fix_type=fix_type):
                cols.append(col)

        hdr={}
        if hdrkeys is not None:
            for key in hdrkeys:
                hdr[key] = _require_key(hdu, key)

    finally:
        tbl.close()

    return colkeys, cols, filename, hdr