Exemplo n.º 1
0
    def save(self):
        """ modified from suvi code by vhsu """
        pri_hdu = fits.PrimaryHDU(data=self.thmap)

        # Temporal Information
        date_fmt = '%Y-%m-%dT%H:%M:%S.%f'
        date_beg = self.start_time.strftime(date_fmt)
        date_end = self.end_time.strftime(date_fmt)
        date_now = datetime.utcnow().strftime(date_fmt)
        self.set_fits_header("TIMESYS", self.ref_hdr, pri_hdu)
        pri_hdu.header.append(
            ("DATE-BEG", date_beg, "sun observation start time on sat"))
        pri_hdu.header.append(
            ("DATE-END", date_end, "sun observation end time on sat"))
        pri_hdu.header.append(("DATE", date_now, "file generation time"))
        pri_hdu.header.append(
            ("EXPERT", self.config.expert, "person who labeled image"))
        pri_hdu.header.append(
            ("DATE-LAB", date_now, "date of labeling for the image"))

        # Instrument & Spacecraft State during Observation
        pri_hdu.header.append(
            ("EXPTIME", 1., "[s] effective imaging exposure time"))
        self.set_fits_header("YAW_FLIP", self.ref_hdr, pri_hdu)
        self.set_fits_header("ECLIPSE", self.ref_hdr, pri_hdu)

        # Pointing & Projection
        self.set_fits_header("WCSNAME", self.ref_hdr, pri_hdu)
        self.set_fits_header("CTYPE1", self.ref_hdr, pri_hdu)
        self.set_fits_header("CTYPE2", self.ref_hdr, pri_hdu)
        self.set_fits_header("CUNIT1", self.ref_hdr, pri_hdu)
        self.set_fits_header("CUNIT2", self.ref_hdr, pri_hdu)
        self.set_fits_header("PC1_1", self.ref_hdr, pri_hdu)
        self.set_fits_header("PC1_2", self.ref_hdr, pri_hdu)
        self.set_fits_header("PC2_1", self.ref_hdr, pri_hdu)
        self.set_fits_header("PC2_2", self.ref_hdr, pri_hdu)
        self.set_fits_header("CDELT1", self.ref_hdr, pri_hdu)
        self.set_fits_header("CDELT2", self.ref_hdr, pri_hdu)
        self.set_fits_header("CRVAL1", self.ref_hdr, pri_hdu)
        self.set_fits_header("CRVAL2", self.ref_hdr, pri_hdu)
        self.set_fits_header("CRPIX1", self.ref_hdr, pri_hdu)
        self.set_fits_header("CRPIX2", self.ref_hdr, pri_hdu)
        self.set_fits_header("DIAM_SUN", self.ref_hdr, pri_hdu)
        self.set_fits_header("LONPOLE", self.ref_hdr, pri_hdu)
        self.set_fits_header("CROTA", self.ref_hdr, pri_hdu)
        self.set_fits_header("SOLAR_B0", self.ref_hdr, pri_hdu)

        # File Provenance
        pri_hdu.header.append(
            ("TITLE", "Expert Labeled Thematic Map Image", "image title"))
        pri_hdu.header.append(
            ("MAP_MTHD", "human", "thematic map classifier method"))
        try:
            # Add COMMENT cards
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 1,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 2,
                ("COMMENT", 'USING SUVI THEMATIC MAP FILES'))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 3,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 4,
                ("COMMENT", 'Map labels are described in the FITS extension.'))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 5, ("COMMENT", 'Example:'))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 6,
                ("COMMENT", 'from astropy.io import fits as pyfits'))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 7,
                ("COMMENT", 'img = pyfits.open(<filename>)'))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 8,
                ("COMMENT", 'map_labels = img[1].data'))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 9,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 10,
                ("COMMENT", 'TEMPORAL INFORMATION'))
            pri_hdu.header.insert(
                pri_hdu.header.index("TITLE") + 11,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("DATE") + 1,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("DATE") + 2,
                ("COMMENT",
                 'INSTRUMENT & SPACECRAFT STATE DURING OBSERVATION'))
            pri_hdu.header.insert(
                pri_hdu.header.index("DATE") + 3,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("ECLIPSE") + 1,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("ECLIPSE") + 2,
                ("COMMENT", 'POINTING & PROJECTION'))
            pri_hdu.header.insert(
                pri_hdu.header.index("ECLIPSE") + 3,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("SOLAR_B0") + 1,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
            pri_hdu.header.insert(
                pri_hdu.header.index("SOLAR_B0") + 2,
                ("COMMENT", 'FILE PROVENANCE'))
            pri_hdu.header.insert(
                pri_hdu.header.index("SOLAR_B0") + 3,
                ("COMMENT",
                 '------------------------------------------------------------------------'
                 ))
        except:
            print(
                "This thematic map may be degraded and missing many keywords.")

        # Thematic map feature list (Secondary HDU extension)
        map_val = []
        map_label = []
        for key, value in self.config.solar_class_index.items(
        ):  #sorted(SOLAR_CLASS_INDEX.items(), key=lambda p: (lambda k, v: (v, k))):
            map_label.append(key)
            map_val.append(value)
        c1 = fits.Column(name="Thematic Map Value",
                         format="B",
                         array=np.array(map_val))
        c2 = fits.Column(name="Feature Name",
                         format="22A",
                         array=np.array(map_label))
        bintbl_hdr = fits.Header([("XTENSION", "BINTABLE")])
        sec_hdu = fits.BinTableHDU.from_columns([c1, c2], header=bintbl_hdr)

        # Output thematic map as the primary HDU and the list of map features as an extension BinTable HDU
        hdu = fits.HDUList([pri_hdu, sec_hdu])
        hdu.writeto(self.filename, overwrite=True, checksum=True)
Exemplo n.º 2
0
    def _from_data(cls, *, version=None, fits_keywords={}, **columns):

        if not hasattr(cls, '_OI_VER'):
            if version is None:
                version = getattr(cls, '_OI_VER', 2)
            cls = cls.get_class(version=version)

        # FITS keywords and column names are upper case
        if isinstance(fits_keywords, fits.Header):
            fits_keywords = {c[0]: c[1:] for c in fits_keywords.cards}

        fits_keywords = {
            k.upper(): v
            for k, v in fits_keywords.items() if v is not None
        }
        columns = {k.upper(): v for k, v in columns.items()}

        # prefix non-standard columns
        oi_colnames = cls._get_oi_columns(required=False)['name']
        columns = {
            n if n in oi_colnames else f"NS_{n}": v
            for n, v in columns.items()
        }

        # Header
        header = fits.Header()
        for card in cls._CARDS:
            name = card['name']
            comment = card['comment']
            if name in fits_keywords:
                value = fits_keywords[name]
                if isinstance(value, (list, tuple)):
                    value = value[0]
                header.set(name, value, comment)
                del fits_keywords[name]
            elif card['required']:
                if card['default'] is not None:
                    header.set(name, card['default'], comment)
        for name, value in fits_keywords.items():
            if not isinstance(value, (tuple, list)):
                value = [value]
            header.set(name, *value)

        # Guess shape
        shape = cls._guess_shape(columns)
        nrows = shape[0]

        def full(s, x):
            if np.ndim(x) > len(s):
                return np.asarray(x)
            return np.full(s, x)

        # Guess errors from data
        obs_names = cls.get_observable_names()
        err_names = cls.get_error_names()
        for obs, err in zip(obs_names, err_names):
            if obs in columns:
                if err not in columns:
                    columns[err] = 0.
        fcols = []

        # official OIFITS columns
        for col in cls._get_oi_columns(required=False):
            name = col['name']
            if name not in columns:
                if col['required']:
                    columns[name] = col['default']
                else:
                    continue
            if name in [*obs_names, *err_names, 'FLAG']:
                new_shape = shape
            else:
                new_shape = (nrows, )
            array = full(new_shape, columns[name])
            del columns[name]
            fcol = fu.ascolumn(array,
                               format=col['format'],
                               unit=col['unit'],
                               name=name)
            fcols.append(fcol)

            # additional columns
        for name, array in columns.items():
            fcol = fu.ascolumn(array, name=name)
            fcols.append(fcol)

        tab = super().from_columns(fcols, header=header)
        return tab
}
"""
Goal:
    For each core in the core photometry catalog, determine the corresponding
    brightness in the above images, then put the core CDF on top of the
    brightness ("column") PDF.
"""

alma_hdr = fits.Header(
    dict(
        NAXIS=2,
        NAXIS1=450,
        NAXIS2=450,
        CTYPE1='RA---SIN',
        CRVAL1=2.668301750000E+02,
        CDELT1=-0.0002777777777777778,
        CRPIX1=225.0,
        CUNIT1='deg     ',
        CTYPE2='DEC--SIN',
        CRVAL2=-2.839256111111E+01,
        CDELT2=0.0002777777777777778,
        CRPIX2=225.0,
        CUNIT2='deg     ',
    ))

tbl = table.Table.read(
    paths.tpath("continuum_photometry_withSIMBAD.ipac"),
    format='ascii.ipac',
)

regfile = pyregion.open(paths.rpath('coverage.reg'))
Exemplo n.º 4
0
def resamp_func(
    d_file,
    z_set,
    ra_set,
    dec_set,
    img_x,
    img_y,
    band,
    out_file,
    z_ref,
    stack_info=None,
    pixel=0.396,
    id_dimm=False,
):
    """
	d_file : path where save the masked data (include file-name structure:'/xxx/xxx/xxx.xxx')
	z_set, ra_set, dec_set : ra, dec, z of will be resampled imgs
	band : the band of imgs, 'str' type
	out_file : path where to save the resampling img
	pixel : pixel scale, in unit 'arcsec' (default is 0.396)
	z_ref : reference redshift, the redshift to which all clusters will be scaled
	id_dimm : if do cosmic dimming correction or not
	img_x, img_y : BCG location on image frame before pixel resampling
	"""
    zn = len(z_set)
    bcg_x, bcg_y = [], []

    for k in range(zn):

        ra_g = ra_set[k]
        dec_g = dec_set[k]
        z_g = z_set[k]

        file = d_file % (band, ra_g, dec_g, z_g)
        data = fits.getdata(file, header=True)

        img = data[0]
        cx0 = data[1]['CRPIX1']
        cy0 = data[1]['CRPIX2']
        RA0 = data[1]['CRVAL1']
        DEC0 = data[1]['CRVAL2']

        #. convert (ra, dec) to location in image frame
        #wcs = awc.WCS(data[1])
        #cx, cy = wcs.all_world2pix(ra_g * U.deg, dec_g * U.deg, 1)

        #. read BCG position from catalog
        cx, cy = img_x[k], img_y[k]

        Da_g = Test_model.angular_diameter_distance(z_g).value
        Da_ref = Test_model.angular_diameter_distance(z_ref).value

        Dl_g = Test_model.luminosity_distance(z_g).value
        Dl_ref = Test_model.luminosity_distance(z_ref).value

        #. observation angle and flux factor at z_ref
        pixel_ref = pixel * (Da_g / Da_ref)
        eta_flux = Dl_g**2 / Dl_ref**2  #... flux change due to distance

        eta_pix = pixel / pixel_ref

        if id_dimm == True:

            dimm_flux = flux_recal(img, z_g, z_ref)
            pre_img = dimm_flux * eta_flux

        else:
            pre_img = img * 1.

        ix0 = np.int(cx0 / eta_pix)
        iy0 = np.int(cy0 / eta_pix)

        if eta_pix > 1:
            resam, xn, yn = sum_samp(eta_pix, eta_pix, pre_img, cx, cy)
        else:
            resam, xn, yn = down_samp(eta_pix, eta_pix, pre_img, cx, cy)

        # cheng the data type
        out_data = resam.astype('float32')

        bcg_x.append(xn)
        bcg_y.append(yn)

        x0 = resam.shape[1]
        y0 = resam.shape[0]

        keys = [
            'SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'CRPIX1',
            'CRPIX2', 'CENTER_X', 'CENTER_Y', 'CRVAL1', 'CRVAL2', 'BCG_RA',
            'BCG_DEC', 'ORIGN_Z', 'P_SCALE'
        ]
        value = [
            'T', 32, 2, x0, y0, ix0, iy0, xn, yn, RA0, DEC0, ra_g, dec_g, z_g,
            pixel
        ]
        ff = dict(zip(keys, value))
        fil = fits.Header(ff)
        fits.writeto(out_file % (band, ra_g, dec_g, z_g),
                     out_data,
                     header=fil,
                     overwrite=True)

    bcg_x = np.array(bcg_x)
    bcg_y = np.array(bcg_y)

    if stack_info != None:
        keys = ['ra', 'dec', 'z', 'bcg_x', 'bcg_y']
        values = [ra_set, dec_set, z_set, bcg_x, bcg_y]
        fill = dict(zip(keys, values))
        data = pds.DataFrame(fill)
        data.to_csv(stack_info)

    return
Exemplo n.º 5
0
def dict_to_hdu(d, name=None, hdr=None, force_to_bintbl=False):
    """
    Write a dictionary to a fits HDU.

    Elements in the dictionary that are integers, floats, or strings
    (specific numpy types or otherwise) are written to the HDU
    header. The header keywords are identical to the dictionary keys.

    If any of the elements in the dictionary are an
    `astropy.table.Table`_, that dictionary can *only* contain that
    table and single values that will be written to the extension
    header. That is, there can be only one `astropy.table.Table`_
    element, and none of the elements can be a :obj:`list` or
    `numpy.ndarray`_. By default the extension name is the dictionary
    key for the `astropy.table.Table`_ item; this can be overridden
    using the ``name`` argument.

    Elements in the dictionary that are a list or a `numpy.ndarray`_
    are written as either an image (if there is only one array and a
    binary table is not specifically requested using
    ``force_to_bintbl``) or a series of table columns. The lists are
    assumed to be interpretable as the ``array`` argument of
    `astropy.io.fits.Column`_ (for a table) or the ``data`` argument
    of `astropy.io.fits.ImageHDU`_ (for an image).

        - If an image is to be written, the extension name, by
          default, is the dictionary key for the array item; this can
          be overridden using the ``name`` argument.

        - If a table is to be written, the method checks that the
          relevant arrays have a consistent number of rows. If they
          do not, the format and dimensions of the table written are
          set so that the arrays are contained in a single row. The
          column names in the table are identical to the dictionary
          keywords. In this case, ``name`` must be provided if you
          want the extension to have a name; there is no default
          name.

    Args:
        d (:obj:`dict`):
            Dictionary with data to write to the
            `astropy.io.fits.BinTableHDU`_.
        name (:obj:`str`, optional):
            Name to give the HDU extension. If None and the input is
            a dictionary with a single array or
            `astropy.table.Table`_ to write, the name of the
            extension is the relevant dictionary keyword. Any
            provided value for ``name`` will override this behavior.
            If the provided dictionary is used to construct a table,
            where the dictionary keys are used for the table column
            names, there is no default name for the extension (i.e.,
            no extension name is used if ``name is None``).
        hdr (`astropy.io.fits.Header`_, optional):
            Base-level header to include in the HDU. If None, an
            empty header is used and then added to.
        force_to_bintbl (:obj:`bool`, optional):
            Force a BinTableHDU to be constructed instead of an
            ImageHDU when either there are no arrays or tables to
            write or only a single array is provided.

    Returns:
        `astropy.io.fits.ImageHDU`_, `astropy.io.fits.BinTableHDU`_:
        HDU with the data. An `astropy.io.fits.ImageHDU`_ object is
        returned if there is 1 (or fewer) array-like objects in the
        dictionary. Otherwise, an `astropy.io.fits.BinTableHDU`_
        object is returned with the data.

    Raises:
        TypeError:
            Raised if the input object is not a dictionary or the
            method cannot interpret how to use an element of the
            dictionary.
        ValueError:
            Raised if dictionary contains another dictionary, more
            than one `astropy.table.Table`_ object, or both an
            `astropy.table.Table`_ and an array-like object
            (:obj:`list` or `numpy.ndarray`_).
    """
    # Check the input is a dictionary (not very pythonic...)
    if not isinstance(d, dict):
        raise TypeError('Input must be a dictionary.')
    # Check the dictionary contents
    ndict = numpy.sum([isinstance(d[key], dict) for key in d.keys()])
    if ndict > 0:
        raise ValueError('Cannot write nested dictionaries.')
    ntab = numpy.sum([isinstance(d[key], Table) for key in d.keys()])
    if ntab > 1:
        raise ValueError(
            'Cannot write dictionaries with more than one astropy.table.Table.'
        )
    narr = numpy.sum(
        [isinstance(d[key], (list, numpy.ndarray)) for key in d.keys()])
    if ntab > 0 and narr > 0:
        raise ValueError(
            'Cannot write dictionaries with both arrays and Tables.')

    # Write any header data and find arrays and Tables
    _hdr = fits.Header() if hdr is None else hdr.copy()
    array_keys = []
    table_keys = []
    for key in d.keys():
        if d[key] is None:
            continue
        # TODO: may be better to do this
        #   isinstance(d[key], (collections.Sequence, numpy.ndarray)):
        # This ignores the defined otype...
        if isinstance(d[key], (list, numpy.ndarray)):
            array_keys += [key]
        elif isinstance(d[key], Table):
            table_keys += [key]
        elif isinstance(d[key],
                        (int, numpy.integer, float, numpy.floating, str)):
            _hdr[key.upper()] = d[key]
        else:
            raise TypeError(
                'Do not know how to write object with type {0}'.format(
                    type(d[key])))

    # If there aren't any arrays or tables, return an empty ImageHDU or
    # BinTableHDU with just the header data.
    if len(array_keys) < 1 and len(table_keys) < 1:
        return fits.BinTableHDU(header=_hdr, name=name) if force_to_bintbl \
                    else fits.ImageHDU(header=_hdr, name=name)

    # If there's only a single array, return it in an ImageHDU or, if
    # requested, a BinTableHDU
    if len(array_keys) == 1 and not force_to_bintbl:
        return fits.ImageHDU(data=d[array_keys[0]],
                             header=_hdr,
                             name=array_keys[0] if name is None else name)

    # If there's only a single Table, return it in a BinTableHDU
    if len(table_keys) == 1:
        # TODO: If we pass hdr directly, does this call include any
        # table meta?
        return fits.BinTableHDU(data=d[table_keys[0]],
                                header=_hdr,
                                name=table_keys[0] if name is None else name)

    # Only remaining option is to build a BinTableHDU based on the
    # dictionary contents.

    # Do all arrays have the same number of rows?
    single_row = len(
        numpy.unique([numpy.asarray(d[key]).shape[0]
                      for key in array_keys])) > 1

    # If the number of rows is inconsistent, save the data in a single
    # row. Otherwise, save the data as a multi-row table.
    cols = []
    for key in array_keys:
        cols += [
            fits.Column(name=key,
                        format=rec_to_fits_type(numpy.asarray(d[key]),
                                                single_row=single_row),
                        dim=rec_to_fits_col_dim(d[key], single_row=single_row),
                        array=numpy.expand_dims(d[key], 0)
                        if single_row else numpy.asarray(d[key]))
        ]
    return fits.BinTableHDU.from_columns(cols, header=_hdr, name=name)
Exemplo n.º 6
0
def _ffi_todo(input_folder, sector, camera, ccd):

    logger = logging.getLogger(__name__)

    cat_tmp = []

    # See if there are any FFIs for this camera and ccd.
    # We just check if an HDF5 file exist.
    hdf5_file = find_hdf5_files(input_folder,
                                sector=sector,
                                camera=camera,
                                ccd=ccd)
    if len(hdf5_file) != 1:
        raise FileNotFoundError("Could not find HDF5 file")

    # Load the relevant information from the HDF5 file for this camera and ccd:
    with h5py.File(hdf5_file[0], 'r') as hdf:
        if isinstance(hdf['wcs'], h5py.Group):
            refindx = hdf['wcs'].attrs['ref_frame']
            hdr_string = hdf['wcs']['%04d' % refindx][0]
        else:
            hdr_string = hdf['wcs'][0]
        if not isinstance(hdr_string, str):
            hdr_string = hdr_string.decode("utf-8")  # For Python 3
        wcs = WCS(header=fits.Header().fromstring(hdr_string))
        offset_rows = hdf['images'].attrs.get('PIXEL_OFFSET_ROW', 0)
        offset_cols = hdf['images'].attrs.get('PIXEL_OFFSET_COLUMN', 0)
        image_shape = hdf['images']['0000'].shape

    # Load the corresponding catalog:
    catalog_file = find_catalog_files(input_folder,
                                      sector=sector,
                                      camera=camera,
                                      ccd=ccd)
    if len(catalog_file) != 1:
        raise FileNotFoundError(
            "Catalog file not found: SECTOR=%s, CAMERA=%s, CCD=%s" %
            (sector, camera, ccd))

    with contextlib.closing(sqlite3.connect(catalog_file[0])) as conn:
        conn.row_factory = sqlite3.Row
        cursor = conn.cursor()

        # Load the settings:
        cursor.execute(
            "SELECT * FROM settings WHERE camera=? AND ccd=? LIMIT 1;",
            (camera, ccd))
        settings = cursor.fetchone()

        # Find all the stars in the catalog brigher than a certain limit:
        cursor.execute(
            "SELECT starid,tmag,ra,decl FROM catalog WHERE tmag < 15 ORDER BY tmag;"
        )
        for row in cursor.fetchall():
            logger.debug("%011d - %.3f", row['starid'], row['tmag'])

            # Calculate the position of this star on the CCD using the WCS:
            ra_dec = np.atleast_2d([row['ra'], row['decl']])
            x, y = wcs.all_world2pix(ra_dec, 0)[0]

            # Subtract the pixel offset if there is one:
            x -= offset_cols
            y -= offset_rows

            # If the target falls outside silicon, do not add it to the todo list:
            # The reason for the strange 0.5's is that pixel centers are at integers.
            if x < -0.5 or y < -0.5 or x > image_shape[
                    1] - 0.5 or y > image_shape[0] - 0.5:
                continue

            # Calculate distance from target to edge of image:
            EdgeDist = edge_distance(y, x, image_shape=image_shape)

            # Calculate the Cotrending Basis Vector area the star falls in:
            cbv_area = calc_cbv_area(row, settings)

            # The targets is on silicon, so add it to the todo list:
            cat_tmp.append({
                'starid': row['starid'],
                'sector': sector,
                'camera': camera,
                'ccd': ccd,
                'datasource': 'ffi',
                'tmag': row['tmag'],
                'cbv_area': cbv_area,
                'edge_dist': EdgeDist
            })

        cursor.close()

    # Create the TODO list as a table which we will fill with targets:
    return Table(rows=cat_tmp,
                 names=('starid', 'sector', 'camera', 'ccd', 'datasource',
                        'tmag', 'cbv_area', 'edge_dist'),
                 dtype=('int64', 'int32', 'int32', 'int32', 'S256', 'float32',
                        'int32', 'float32'))
Exemplo n.º 7
0
    def save(self,
             outfile=None,
             overwrite=True,
             traceImage=None,
             tslits_dict=None):
        """
        Save the main TraceSlits data as a MasterFrame.

        TODO: Change the format of this...

        Args:
            outfile (:obj:`str`, optional):
                Name for the output file.  Defaults to
                :attr:`file_path`.
            overwrite (:obj:`bool`, optional):
                Overwrite any existing file.
            traceImage (`numpy.ndarray`_, :class:`pypeit.traceimage.TraceImage`, optional):
                An array with only the image data or the full
                :class:`pypeit.traceimage.TraceImage` instance with the
                data used to construct the slit traces.
        """
        _outfile = self.file_path if outfile is None else outfile
        # Check if it exists
        if os.path.exists(_outfile) and not overwrite:
            msgs.warn('Master file exists: {0}'.format(_outfile) +
                      msgs.newline() + 'Set overwrite=True to overwrite it.')
            return

        _tslits_dict = self.tslits_dict if tslits_dict is None else tslits_dict

        # Log
        msgs.info('Saving master frame to {0}'.format(_outfile))

        # Build the header
        hdr = fits.Header()
        #   - Set the master frame type
        hdr['FRAMETYP'] = (self.master_type,
                           'PypeIt: Master calibration frame type')
        #   - List the completed steps
        hdr['STEPS'] = (','.join(self.steps), 'Completed reduction steps')
        #   - Provide the file names
        if traceImage is not None:
            try:
                nfiles = len(traceImage.files)
                ndig = int(np.log10(nfiles)) + 1
                for i in range(nfiles):
                    hdr['F{0}'.format(i+1).zfill(ndig)] \
                            = (traceImage.files[i], 'PypeIt: Processed raw file')
            except:
                msgs.warn(
                    'Master trace frame does not include list of source files.'
                )
        #   - Slit metadata
        # TODO: Provide header comments
        hdr['DET'] = self.det
        hdr['NSPEC'] = _tslits_dict['nspec']
        hdr['NSPAT'] = _tslits_dict['nspat']
        hdr['NSLITS'] = _tslits_dict['nslits']
        hdr['PAD'] = _tslits_dict['pad']
        hdr['BINSPEC'] = _tslits_dict['binspectral']
        hdr['BINSPAT'] = _tslits_dict['binspatial']
        hdr['SPECTROG'] = _tslits_dict['spectrograph']

        # Collect data that may be None.  If they are None, no data will
        # be in the relevant extensions.
        mstrace = self.mstrace
        if traceImage is not None:
            try:
                mstrace = traceImage.stack
            except:
                # Assume it failed because it's not a TraceImage object
                # and it's a numpy.ndarray to write.
                mstrace = traceImage
        left_orig = None if 'slit_left_orig' not in _tslits_dict.keys() \
                        else _tslits_dict['slit_left_orig']
        righ_orig = None if 'slit_righ_orig' not in _tslits_dict.keys() \
                        else _tslits_dict['slit_righ_orig']

        # Write the file
        fits.HDUList([
            fits.PrimaryHDU(header=hdr),
            fits.ImageHDU(data=mstrace, name='TRACEIMG'),
            # TODO: These should be written to a BinaryTable
            fits.ImageHDU(data=_tslits_dict['slit_left'], name='SLIT_LEFT'),
            fits.ImageHDU(data=_tslits_dict['slit_righ'], name='SLIT_RIGH'),
            fits.ImageHDU(data=_tslits_dict['slitcen'], name='SLITCEN'),
            fits.ImageHDU(data=_tslits_dict['spec_min'], name='SPEC_MIN'),
            fits.ImageHDU(data=_tslits_dict['spec_max'], name='SPEC_MAX'),
            fits.ImageHDU(data=left_orig, name='SLIT_LEFT_ORIG'),
            fits.ImageHDU(data=righ_orig, name='SLIT_RIGH_ORIG'),
            fits.ImageHDU(data=_tslits_dict['maskslits'].astype(int),
                          name='MASK'),  # int deals with bool
        ]).writeto(_outfile, overwrite=True)
Exemplo n.º 8
0
 # Load map
 DL07_paras, hp_header = hp.read_map(
     map_name,
     # field indicates which column you choose to load, starting from 0.
     field = 0,
     h = True,
     nest=None,
 )
 e_DL07_paras, e_hp_header = hp.read_map(
     map_name,
     # field indicates which column you choose to load, starting from 0.
     field = 1,
     h = True,
     nest=None,
 )
 hp_hdu = fits.ImageHDU(DL07_paras, fits.Header(hp_header))
 hp_hdu.header['UNIT'] = r"$M_{sun}/kpc^2$"
 e_hp_hdu = fits.ImageHDU(e_DL07_paras, fits.Header(e_hp_header))
 e_hp_hdu.header['UNIT'] = r"$M_{sun}/kpc^2$"
 # Show the header of this map.
 hdul = fits.open(map_name)
 hdul.info()
 hdr = hdul[1].header
 print("### HEADER for this map ###")
 print(repr(hdr))
 print("### END of HEADER ###") 
 #--------------------------------------------------
 # Initialize the target coordinate
 cloud_skycoord_list = []
 cloud_ra_list = []
 cloud_dec_list = []
Exemplo n.º 9
0
        'psi': psi,
        'r': r,
        'x': x,
        'y': y,
        'intensity': intensity,
        'mcAlttel': mcAlttel,
        'mcAztel': mcAztel
    }
    ntuple = Table(output)

    #If destination fitsfile doesn't exist, will create a new one with proper headers
    if os.path.isfile(outfile) == False:
        if filetype == 'fits':
            #Convert Tables of data into HDUBinTables to write them into fits files
            pardata = ntuple.as_array()
            parheader = fits.Header()
            parheader.update(ntuple.meta)

            if storeimg == True:
                pixels = fits.ImageHDU(fitsdata)  #Image with pixel content

            #Write the data in an HDUList for storing in a fitsfile
            hdr = fits.Header(
            )  #Example header, we can add more things to this header
            hdr['TEL'] = 'LST1'
            primary_hdu = fits.PrimaryHDU(header=hdr)
            hdul = fits.HDUList([primary_hdu])
            hdul.append(fits.BinTableHDU(data=pardata, header=parheader))
            if storeimg == True:
                hdul.append(pixels)
            hdul.writeto(outfile)
Exemplo n.º 10
0
def resamp_sky(band_id, sub_z, sub_ra, sub_dec):

    ii = np.int(band_id)
    zn = len(sub_z)
    for k in range(zn):
        ra_g = sub_ra[k]
        dec_g = sub_dec[k]
        z_g = sub_z[k]
        Da_g = Test_model.angular_diameter_distance(z_g).value
        try:
            data = fits.open(
                load +
                'random_cat/sky_img/rand_sky-ra%.3f-dec%.3f-z%.3f-%s-band.fits'
                % (ra_g, dec_g, z_g, band[ii]))
            img = data[0].data
            head = data[0].header
            cx0 = data[0].header['CRPIX1']
            cy0 = data[0].header['CRPIX2']
            RA0 = data[0].header['CRVAL1']
            DEC0 = data[0].header['CRVAL2']
            wcs = awc.WCS(head)
            cx, cy = wcs.all_world2pix(ra_g * U.deg, dec_g * U.deg, 1)

            Angur = rad2asec / Da_g
            Rp = Angur / pixel
            L_ref = Da_ref * pixel / rad2asec
            L_z0 = Da_g * pixel / rad2asec
            b = L_ref / L_z0

            ix0 = np.int(cx0 / b)
            iy0 = np.int(cy0 / b)

            if b > 1:
                resam, xn, yn = sum_samp(b, b, img, cx, cy)
            else:
                resam, xn, yn = down_samp(b, b, img, cx, cy)

            xn = np.int(xn)
            yn = np.int(yn)
            x0 = resam.shape[1]
            y0 = resam.shape[0]

            keys = [
                'SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'CRPIX1',
                'CRPIX2', 'CENTER_X', 'CENTER_Y', 'CRVAL1', 'CRVAL2',
                'CENTER_RA', 'CENTER_DEC', 'ORIGN_Z', 'P_SCALE'
            ]
            value = [
                'T', 32, 2, x0, y0, ix0, iy0, xn, yn, RA0, DEC0, ra_g, dec_g,
                z_g, pixel
            ]
            ff = dict(zip(keys, value))
            fil = fits.Header(ff)
            fits.writeto(
                load +
                'random_cat/sky_resamp_img/rand_resamp-sky-%s-ra%.3f-dec%.3f-redshift%.3f.fits'
                % (band[ii], ra_g, dec_g, z_g),
                resam,
                header=fil,
                overwrite=True)
        except FileNotFoundError:
            continue
    return
Exemplo n.º 11
0
def fits_header():
    hdr = fits.Header()
    hdr[KEY] = (VALUE, COMMENT)
    hdr[ADDITIVE_KEY] = ADDITIVE_ENTRY
    return hdr
Exemplo n.º 12
0
    def createNpolHdr(cls, sciheader, npolfile, wdvarr_ver, npl_extname, ccdchip):
        """
        Creates a header for the WCSDVARR extension based on the NPOL reference file
        and sci extension header. The goal is to always work in image coordinates
        (also for subarrays and binned images. The WCS for the WCSDVARR extension
        i ssuch that a full size npol table is created and then shifted or scaled
        if the science image is a subarray or binned image.
        """
        npl = fits.open(npolfile)
        npol_phdr = npl[0].header
        for ext in npl:
            try:
                nplextname = ext.header['EXTNAME']
                nplextver = ext.header['EXTVER']
            except KeyError:
                continue

            nplccdchip = cls.get_ccdchip(npl, extname=nplextname, extver=nplextver)
            if nplextname == npl_extname and nplccdchip == ccdchip:
                npol_header = ext.header
                break

        npl.close()

        naxis = npl[1].header['NAXIS']
        ccdchip = nplextname  # npol_header['CCDCHIP']

        cdl = [
            ('XTENSION', 'IMAGE', 'Image extension'),
            ('BITPIX', -32, 'number of bits per data pixel'),
            ('NAXIS', naxis, 'Number of data axes'),
            ('EXTNAME', 'WCSDVARR', 'WCS distortion array'),
            ('EXTVER', wdvarr_ver, 'Distortion array version number'),
            ('PCOUNT', 0, 'number of parameters'),
            ('GCOUNT', 1, 'number of groups'),
            ('CCDCHIP', ccdchip),
        ]

        for i in range(1, naxis + 1):
            cdl.append((f'NAXIS{i:d}', npol_header.get(f'NAXIS{i:d}'),
                        f"length of data axis {i:d}"))
            cdl.append((f'CDELT{i:d}', npol_header.get(f'CDELT{i:d}', 1.0) *
                        sciheader.get(f'LTM{i:d}_{i:d}', 1),
                        "Coordinate increment at reference point"))
            cdl.append((f'CRPIX{i:d}', npol_header.get(f'CRPIX{i:d}', 0.0),
                        "Pixel coordinate of reference point"))
            cdl.append((f'CRVAL{i:d}', npol_header.get(f'CRVAL{i:d}', 0.0) -
                        sciheader.get(f'LTV{i:d}', 0),
                        "Coordinate value at reference point"))

        # Now add keywords from NPOLFILE header to document source of calibration
        # include all keywords after and including 'FILENAME' from header
        start_indx = -1
        end_indx = 0
        for i, c in enumerate(npol_phdr):
            if c == 'FILENAME':
                start_indx = i
            if c == '':  # remove blanks from end of header
                end_indx = i + 1
                break
        if start_indx >= 0:
            for card in npol_phdr.cards[start_indx: end_indx]:
                cdl.append(card)

        hdr = fits.Header(cards=cdl)

        return hdr
Exemplo n.º 13
0
def combine_bias(config, logtable):
    """Combine the bias images.

    Args:
        config (:class:`configparser.ConfigParser`): Config object.
        logtable (:class:`astropy.table.Table`): Table of Observing log.

    Returns:
        tuple: A tuple containing:

            * **bias** (:class:`numpy.ndarray`) – Output bias image.
            * **bias_card_lst** (list) – List of FITS header cards related to
              the bias correction.

    """

    rawpath      = config['data']['rawpath']
    readout_mode = config['data']['readout_mode']

    # determine number of cores to be used
    ncores = config['reduce'].get('ncores')
    if ncores == 'max':
        ncores = os.cpu_count()
    else:
        ncores = min(os.cpu_count(), int(ncores))

    section = config['reduce.bias']
    bias_file = section['bias_file']

    bias_data_lst = []
    bias_card_lst = []

    bias_items = list(filter(lambda item: item['object'].lower()=='bias',
                             logtable))
    # get the number of bias images
    n_bias = len(bias_items)

    if n_bias == 0:
        # there is no bias frames
        return None, []


    fmt_str = '  - {:>7s} {:^11} {:^8s} {:^7} {:^19s}'
    head_str = fmt_str.format('frameid', 'FileID', 'Object', 'exptime',
                'obsdate')

    for iframe, logitem in enumerate(bias_items):

        # now filter the bias frames
        fname = '{}.fits'.format(logitem['fileid'])
        filename = os.path.join(rawpath, fname)
        data, head = fits.getdata(filename, header=True)
        mask = get_mask(data, head)
        data, card_lst = correct_overscan(data, head, readout_mode)

        # pack the data and fileid list
        bias_data_lst.append(data)

        # append the file information
        prefix = 'HIERARCH GAMSE BIAS FILE {:03d}'.format(iframe+1)
        card = (prefix+' FILEID', logitem['fileid'])
        bias_card_lst.append(card)

        # append the overscan information of each bias frame to
        # bias_card_lst
        for keyword, value in card_lst:
            mobj = re.match('^HIERARCH GAMSE (OVERSCAN[\s\S]*)', keyword)
            if mobj:
                newkey = prefix + ' ' + mobj.group(1)
                bias_card_lst.append((newkey, value))

        # print info
        if iframe == 0:
            print('* Combine Bias Images: "{}"'.format(bias_file))
            print(head_str)
        message = fmt_str.format(
                    '[{:d}]'.format(logitem['frameid']),
                    logitem['fileid'], logitem['object'],
                    logitem['exptime'], logitem['obsdate'],
                    )
        print(message)

    prefix = 'HIERARCH GAMSE BIAS '
    bias_card_lst.append((prefix + 'NFILE', n_bias))

    # combine bias images
    bias_data_lst = np.array(bias_data_lst)

    combine_mode = 'mean'
    cosmic_clip  = section.getfloat('cosmic_clip')
    maxiter      = section.getint('maxiter')
    maskmode    = (None, 'max')[n_bias>=3]

    bias_combine = combine_images(bias_data_lst,
            mode        = combine_mode,
            upper_clip  = cosmic_clip,
            maxiter     = maxiter,
            maskmode    = maskmode,
            ncores      = ncores,
            )

    bias_card_lst.append((prefix+'COMBINE_MODE', combine_mode))
    bias_card_lst.append((prefix+'COSMIC_CLIP',  cosmic_clip))
    bias_card_lst.append((prefix+'MAXITER',      maxiter))
    bias_card_lst.append((prefix+'MASK_MODE',    str(maskmode)))

    # create the hdu list to be saved
    hdu_lst = fits.HDUList()
    # create new FITS Header for bias
    head = fits.Header()
    for card in bias_card_lst:
        head.append(card)
    head['HIERARCH GAMSE FILECONTENT 0'] = 'BIAS COMBINED'
    hdu_lst.append(fits.PrimaryHDU(data=bias_combine, header=head))

    ############## bias smooth ##################
    if section.getboolean('smooth'):
        # bias needs to be smoothed
        smooth_method = section.get('smooth_method')

        ny, nx = bias_combine.shape
        newcard_lst = []
        if smooth_method in ['gauss', 'gaussian']:
            # perform 2D gaussian smoothing
            smooth_sigma = section.getint('smooth_sigma')
            smooth_mode  = section.get('smooth_mode')
            bias_smooth = np.zeros_like(bias_combine, dtype=np.float64)
            bias_smooth[0:ny//2, :] = gaussian_filter(
                                        bias_combine[0:ny//2, :],
                                        sigma = smooth_sigma,
                                        mode  = smooth_mode)
            bias_smooth[ny//2:ny, :] = gaussian_filter(
                                        bias_combine[ny//2:ny, :],
                                        sigma = smooth_sigma,
                                        mode  = smooth_mode)

            # write information to FITS header
            newcard_lst.append((prefix+'SMOOTH CORRECTED',  True))
            newcard_lst.append((prefix+'SMOOTH METHOD', 'GAUSSIAN'))
            newcard_lst.append((prefix+'SMOOTH SIGMA',  smooth_sigma))
            newcard_lst.append((prefix+'SMOOTH MODE',   smooth_mode))
        else:
            print('Unknown smooth method: ', smooth_method)
            pass

        # pack the cards to bias_card_lst and also hdu_lst
        for card in newcard_lst:
            hdu_lst[0].header.append(card)
            bias_card_lst.append(card)
        hdu_lst.append(fits.ImageHDU(data=bias_smooth))
        card = ('HIERARCH GAMSE FILECONTENT 1', 'BIAS SMOOTHED')
        hdu_lst[0].header.append(card)

        # bias is the result array to return
        bias = bias_smooth
    else:
        # bias not smoothed
        card = (prefix+'SMOOTH CORRECTED', False)
        bias_card_lst.append(card)
        hdu_lst[0].header.append(card)

        # bias is the result array to return
        bias = bias_combine

    hdu_lst.writeto(bias_file, overwrite=True)

    message = 'Bias image written to "{}"'.format(bias_file)
    logger.info(message)
    print(message)

    return bias, bias_card_lst
Exemplo n.º 14
0
def convert(input):

    """Input GEIS files "input" will be read and a HDUList object will
       be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.

       The user can use the writeto method to write the HDUList object to
       a FITS file.
    """

    global dat
    cardLen = fits.Card.length

    # input file(s) must be of the form *.??h and *.??d
    if input[-1] != 'h' or input[-4] != '.':
        raise "Illegal input GEIS file name %s" % input

    data_file = input[:-1]+'d'

    _os = sys.platform
    if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
        bytes_per_line = cardLen+1
    else:
        raise "Platform %s is not supported (yet)." % _os

    end_card = 'END'+' '* (cardLen-3)

    # open input file
    im = open(input)

    # Generate the primary HDU
    cards = []
    while 1:
        line = im.read(bytes_per_line)[:cardLen]
        line = line[:8].upper() + line[8:]
        if line == end_card:
            break
        cards.append(fits.Card.fromstring(line))

    phdr = fits.Header(cards)
    im.close()

    # Determine starting point for adding Group Parameter Block keywords to Primary header
    phdr_indx = phdr.index('PSIZE')

    _naxis0 = phdr.get('NAXIS', 0)
    _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
    _naxis.insert(0, _naxis0)
    _bitpix = phdr['BITPIX']
    _psize = phdr['PSIZE']
    if phdr['DATATYPE'][:4] == 'REAL':
        _bitpix = -_bitpix
    if _naxis0 > 0:
        size = reduce(lambda x,y:x*y, _naxis[1:])
        data_size = abs(_bitpix) * size // 8
    else:
        data_size = 0
    group_size = data_size + _psize // 8

    # decode the group parameter definitions,
    # group parameters will become extension table
    groups = phdr['GROUPS']
    gcount = phdr['GCOUNT']
    pcount = phdr['PCOUNT']

    formats = []
    bools = []
    floats = []
    cols = [] # column definitions used for extension table
    cols_dict = {} # provides name access to Column defs
    _range = range(1, pcount+1)
    key = [phdr['PTYPE'+str(j)] for j in _range]
    comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]

    # delete group parameter definition header keywords
    _list = ['PTYPE'+str(j) for j in _range] + \
            ['PDTYPE'+str(j) for j in _range] + \
            ['PSIZE'+str(j) for j in _range] + \
            ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']

    # Construct record array formats for the group parameters
    # as interpreted from the Primary header file
    for i in range(1, pcount+1):
        ptype = key[i-1]
        pdtype = phdr['PDTYPE'+str(i)]
        star = pdtype.find('*')
        _type = pdtype[:star]
        _bytes = pdtype[star+1:]

        # collect boolean keywords since they need special attention later

        if _type == 'LOGICAL':
            bools.append(i)
        if pdtype == 'REAL*4':
            floats.append(i)

        # identify keywords which require conversion to special units
        if ptype in kw_DOUBLE:
            _type = 'DOUBLE'

        fmt = geis_fmt[_type] + _bytes
        formats.append((ptype,fmt))

        # Set up definitions for use in creating the group-parameter block table
        nrpt = ''
        nbits = str(int(_bytes)*8)
        if 'CHAR' in _type:
            nrpt = _bytes
            nbits = _bytes

        afmt = cols_fmt[_type]+ nbits
        if 'LOGICAL' in _type:
            afmt = cols_fmt[_type]
        cfmt = cols_pfmt[_type]+nrpt

        #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt
        cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt))
        cols.append(cols_dict[ptype]) # This keeps the columns in order

    _shape = _naxis[1:]
    _shape.reverse()
    _code = fits.hdu.ImageHDU.NumCode[_bitpix]
    _bscale = phdr.get('BSCALE', 1)
    _bzero = phdr.get('BZERO', 0)

    if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
        _uint16 = 1
        _bzero = 32768
    else:
        _uint16 = 0

    # delete from the end, so it will not conflict with previous delete
    for i in range(len(phdr)-1, -1, -1):
        if phdr.cards[i].keyword in _list:
            del phdr[i]

    # clean up other primary header keywords
    phdr['SIMPLE'] = True
    phdr['GROUPS'] = False
    _after = 'NAXIS'
    if _naxis0 > 0:
        _after += str(_naxis0)
    phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after)

    # Use copy-on-write for all data types since byteswap may be needed
    # in some platforms.
    f1 = open(data_file, mode='rb')
    dat = f1.read()
    errormsg = ""

    # Define data array for all groups
    arr_shape = _naxis[:]
    arr_shape[0] = gcount
    arr_stack = numpy.zeros(arr_shape,dtype=_code)

    loc = 0
    for k in range(gcount):
        ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
        ext_dat = ext_dat.reshape(_shape)
        if _uint16:
            ext_dat += _bzero
        # Check to see whether there are any NaN's or infs which might indicate
        # a byte-swapping problem, such as being written out on little-endian
        #   and being read in on big-endian or vice-versa.
        if _code.find('float') >= 0 and \
            (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
            errormsg += "===================================\n"
            errormsg += "= WARNING:                        =\n"
            errormsg += "=  Input image:                   =\n"
            errormsg += input+"[%d]\n"%(k+1)
            errormsg += "=  had floating point data values =\n"
            errormsg += "=  of NaN and/or Inf.             =\n"
            errormsg += "===================================\n"
        elif _code.find('int') >= 0:
            # Check INT data for max values
            ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
            if ext_dat_exp.max() == int(_bitpix) - 1:
                # Potential problems with byteswapping
                errormsg += "===================================\n"
                errormsg += "= WARNING:                        =\n"
                errormsg += "=  Input image:                   =\n"
                errormsg += input+"[%d]\n"%(k+1)
                errormsg += "=  had integer data values        =\n"
                errormsg += "=  with maximum bitvalues.        =\n"
                errormsg += "===================================\n"

        arr_stack[k] = ext_dat
        #ext_hdu = fits.hdu.ImageHDU(data=ext_dat)

        rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)

        loc += group_size

        # Add data from this GPB to table
        for i in range(1, pcount+1):
            val = rec[0][i-1]
            if i in bools:
                if val:
                    val = 'T'
                else:
                    val = 'F'
            cols[i-1].array[k] = val

        # Based on the first group, add GPB keywords to PRIMARY header
        if k == 0:
            # Create separate PyFITS Card objects for each entry in 'rec'
            # and update Primary HDU with these keywords after PSIZE
            for i in range(1, pcount+1):
                #val = rec.field(i-1)[0]
                val = rec[0][i-1]
                if val.dtype.kind == 'S':
                    val = val.decode('ascii')

                if i in bools:
                    if val:
                        val = True
                    else:
                        val = False
                    
                elif i in floats:
                    # use fromstring, format in Card is deprecated in pyfits 0.9
                    _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1])
                    _card = fits.Card.fromstring(_str)
                    
                else:
                    _card = fits.Card(key=key[i-1], value=val, comment=comm[i-1])

                phdr.insert(phdr_indx+i, _card)

            # deal with bscale/bzero
            if (_bscale != 1 or _bzero != 0):
                phdr['BSCALE'] = _bscale
                phdr['BZERO'] = _bzero

        #hdulist.append(ext_hdu)
    # Define new table based on Column definitions
    ext_table = fits.new_table(cols,tbtype='TableHDU')
    ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS')
    # Add column descriptions to header of table extension to match stwfits output
    for i in range(len(key)):
        ext_table.header.append(fits.Card(keyword=key[i], value=comm[i]))

    if errormsg != "":
        errormsg += "===================================\n"
        errormsg += "=  This file may have been        =\n"
        errormsg += "=  written out on a platform      =\n"
        errormsg += "=  with a different byte-order.   =\n"
        errormsg += "=                                 =\n"
        errormsg += "=  Please verify that the values  =\n"
        errormsg += "=  are correct or apply the       =\n"
        errormsg += "=  '.byteswap()' method.          =\n"
        errormsg += "===================================\n"
        print(errormsg)

    f1.close()

    hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)])
    hdulist.append(ext_table)

    stsci2(hdulist,input)
    return hdulist
Exemplo n.º 15
0
def create_catalogs_out(fileList, z, snap_name):
    """
	Adds Xray emission mass using the Bongiorno et al. 2016 model to the rockstar outputs. 
	"""
    def f_lambda_sar(DATA):
        logM, log_lambda_SAR = DATA
        log_lambda_SAR_var = 10**(log_lambda_SAR - 33.8 + 0.48 * (logM - 11.))
        return 1. / (log_lambda_SAR_var**(1.01 - 0.58 * (z - 1.1)) +
                     log_lambda_SAR_var**(3.72))

    dl = 0.01
    log_lambda_SAR_values = n.arange(32 - dl, 36 + 2 * dl, dl)

    # loops over files
    for fileName in fileList:
        t0 = time.time()
        outFile = fileName[:-5] + "_LSAR.fits"
        # opens all relevant files
        msFile = fileName[:-5] + "_Ms.fits"
        hd = fits.open(fileName)
        hm = fits.open(msFile)

        logM = hm[1].data['stellar_mass_Mo13_mvir']
        agn_random_number = n.random.random(len(logM))
        log_lSAR = n.zeros(len(logM))

        t0 = time.time()
        ii0 = 0
        ii_step = 10000
        for ii0 in n.arange(0, len(logM), ii_step):
            ii1 = ii0 + ii_step
            X, Y = n.meshgrid(logM[ii0:ii1], log_lambda_SAR_values)
            #Z = n.ones_like(X)*z
            probas_un = f_lambda_sar([X, Y])  #, Z ])
            norm = n.sum(probas_un, axis=0)
            probas = probas_un / norm
            cmat = n.array([
                agn_random_number[ii0:ii1] > n.sum(probas.T[:, jj:], axis=1)
                for jj in n.arange(len(log_lambda_SAR_values))
            ])
            #print(cmat.shape, cmat[0])
            #print(cmat.T[1])
            values = log_lambda_SAR_values[n.array([
                n.min(n.where(cmat.T[jj] == True))
                for jj in n.arange(len(cmat.T))
            ])]
            #print(values.shape, values[:10])
            log_lSAR[ii0:ii1] = values
            print(ii0, len(logM), time.time() - t0)

        # columns related to Xray AGN
        col1 = fits.Column(name='lambda_sar_Bo16', format='D', array=log_lSAR)
        col1b = fits.Column(name='agn_random_number',
                            format='D',
                            array=agn_random_number)

        #define the table hdu
        colArray = [col1]
        colArray.append(col1b)
        #for col in hd[1].columns :
        #colArray.append(col)

        hdu_cols = fits.ColDefs(colArray)
        tb_hdu = fits.BinTableHDU.from_columns(hdu_cols)
        #define the header
        prihdr = fits.Header()
        prihdr['author'] = 'JC'
        prihdu = fits.PrimaryHDU(header=prihdr)
        #writes the file
        thdulist = fits.HDUList([prihdu, tb_hdu])
        if os.path.isfile(outFile):
            os.system("rm " + outFile)

        thdulist.writeto(outFile)
        print time.time() - t0
Exemplo n.º 16
0
def api_search(request, query):
    import numpy as np
    from astropy.io import fits
    from astropy.io.fits import Column

    result = unsqurl(query)
    if result['returncode'] != '200':
        #return error
        return HttpResponse(result['error'])

    print('UNSqurl:', result)

    #run the query
    cursor = connections['cosmo'].cursor()
    cursor.execute(result['sql'])
    rows = cursor.fetchall()
    #return FITS file
    priheader = fits.Header()
    priheader['COMMENT'] = "This file was generated by the Cosmo web portal."
    prihdu = fits.PrimaryHDU(header=priheader)
    # this works if the fields all map directly with no arrays
    # for i in range (0, nrows):
    #     for j in range (0, len(dtypes)):
    #         data[i][dtypes[j][0]] = rows[i][j]
    nrows = len(rows)
    if result['table'] == "DEFAULT":
        # try building from columns
        data = {
            'cand_id': [],
            'brickid': [],
            'objid': [],
            'type': [],
            'ra': [],
            'ra_ivar': [],
            'dec': [],
            'dec_ivar': [],
            'bx': [],
            'by': [],
            'bx0': [],
            'by0': [],
            'ebv': [],
            'dchisq': [],
            'fracDev': [],
            'fracDev_ivar': [],
            'shapeExp_r': [],
            'shapeExp_r_ivar': [],
            'shapeExp_e1': [],
            'shapeExp_e1_ivar': [],
            'shapeExp_e2': [],
            'shapeExp_e2_ivar': [],
            'shapeDev_r': [],
            'shapeDev_r_ivar': [],
            'shapeDev_e1': [],
            'shapeDev_e1_ivar': [],
            'shapeDev_e2': [],
            'shapeDev_e2_ivar': [],
            'decam_flux': [],
            'decam_flux_ivar': [],
            'decam_fracflux': [],
            'decam_fracmasked': [],
            'decam_fracin': [],
            'decam_rchi2': [],
            'decam_nobs': [],
            'decam_anymask': [],
            'decam_allmask': [],
            'decam_mw_transmission': [],
            'wise_flux': [],
            'wise_flux_ivar': [],
            'wise_fracflux': [],
            'wise_rchi2': [],
            'wise_nobs': [],
            'wise_mw_transmission': [],
            'decam_apflux': [],
            'decam_apflux_resid': [],
            'decam_apflux_ivar': [],
        }
        for i in range(0, nrows):
            data['cand_id'].append(rows[i][0])
            data['brickid'].append(rows[i][3])
            data['objid'].append(rows[i][4])
            data['type'].append(rows[i][5])
            data['ra'].append(rows[i][6])
            data['ra_ivar'].append(rows[i][7])
            data['dec'].append(rows[i][8])
            data['dec_ivar'].append(rows[i][9])
            data['bx'].append(rows[i][10])
            data['by'].append(rows[i][11])
            data['bx0'].append(rows[i][12])
            data['by0'].append(rows[i][13])
            data['ebv'].append(rows[i][14])
            data['dchisq'].append(
                [rows[i][15], rows[i][16], rows[i][17], rows[i][18]])
            data['fracDev'].append(rows[i][19])
            data['fracDev_ivar'].append(rows[i][20])
            data['shapeExp_r'].append(rows[i][21])
            data['shapeExp_r_ivar'].append(rows[i][22])
            data['shapeExp_e1'].append(rows[i][23])
            data['shapeExp_e1_ivar'].append(rows[i][24])
            data['shapeExp_e2'].append(rows[i][25])
            data['shapeExp_e2_ivar'].append(rows[i][26])
            data['shapeDev_r'].append(rows[i][27])
            data['shapeDev_r_ivar'].append(rows[i][28])
            data['shapeDev_e1'].append(rows[i][29])
            data['shapeDev_e1_ivar'].append(rows[i][30])
            data['shapeDev_e2'].append(rows[i][31])
            data['shapeDev_e2_ivar'].append(rows[i][32])
            data['decam_flux'].append([
                rows[i][33], rows[i][34], rows[i][35], rows[i][36],
                rows[i][37], rows[i][38]
            ])
            data['decam_flux_ivar'].append([
                rows[i][39], rows[i][40], rows[i][41], rows[i][42],
                rows[i][43], rows[i][44]
            ])
            data['decam_fracflux'].append([
                rows[i][45], rows[i][46], rows[i][47], rows[i][48],
                rows[i][49], rows[i][50]
            ])
            data['decam_fracmasked'].append([
                rows[i][51], rows[i][52], rows[i][53], rows[i][54],
                rows[i][55], rows[i][56]
            ])
            data['decam_fracin'].append([
                rows[i][57], rows[i][58], rows[i][59], rows[i][60],
                rows[i][61], rows[i][62]
            ])
            data['decam_rchi2'].append([
                rows[i][63], rows[i][64], rows[i][65], rows[i][66],
                rows[i][67], rows[i][68]
            ])
            data['decam_nobs'].append([
                rows[i][69], rows[i][70], rows[i][71], rows[i][72],
                rows[i][73], rows[i][74]
            ])
            data['decam_anymask'].append([
                rows[i][75], rows[i][76], rows[i][77], rows[i][78],
                rows[i][79], rows[i][80]
            ])
            data['decam_allmask'].append([
                rows[i][81], rows[i][82], rows[i][83], rows[i][84],
                rows[i][85], rows[i][86]
            ])
            data['decam_mw_transmission'].append([
                rows[i][87], rows[i][88], rows[i][89], rows[i][90],
                rows[i][91], rows[i][92]
            ])
            data['wise_flux'].append(
                [rows[i][93], rows[i][99], rows[i][105], rows[i][111]])
            data['wise_flux_ivar'].append(
                [rows[i][94], rows[i][100], rows[i][106], rows[i][112]])
            data['wise_fracflux'].append(
                [rows[i][95], rows[i][101], rows[i][107], rows[i][113]])
            data['wise_rchi2'].append(
                [rows[i][96], rows[i][102], rows[i][108], rows[i][114]])
            data['wise_nobs'].append(
                [rows[i][97], rows[i][103], rows[i][109], rows[i][115]])
            data['wise_mw_transmission'].append(
                [rows[i][98], rows[i][104], rows[i][110], rows[i][116]])
            data['decam_apflux'].append([
                rows[i][118], rows[i][119], rows[i][120], rows[i][121],
                rows[i][122], rows[i][123], rows[i][124], rows[i][125],
                rows[i][142], rows[i][143], rows[i][144], rows[i][145],
                rows[i][146], rows[i][147], rows[i][148], rows[i][149],
                rows[i][166], rows[i][167], rows[i][168], rows[i][169],
                rows[i][170], rows[i][171], rows[i][172], rows[i][173],
                rows[i][190], rows[i][191], rows[i][192], rows[i][193],
                rows[i][194], rows[i][195], rows[i][196], rows[i][197],
                rows[i][214], rows[i][215], rows[i][216], rows[i][217],
                rows[i][218], rows[i][219], rows[i][220], rows[i][221],
                rows[i][238], rows[i][239], rows[i][240], rows[i][241],
                rows[i][242], rows[i][243], rows[i][244], rows[i][245]
            ])
            data['decam_apflux_resid'].append([
                rows[i][126], rows[i][127], rows[i][128], rows[i][129],
                rows[i][130], rows[i][131], rows[i][132], rows[i][133],
                rows[i][150], rows[i][151], rows[i][152], rows[i][153],
                rows[i][154], rows[i][155], rows[i][156], rows[i][157],
                rows[i][174], rows[i][175], rows[i][176], rows[i][177],
                rows[i][178], rows[i][179], rows[i][180], rows[i][181],
                rows[i][198], rows[i][199], rows[i][200], rows[i][201],
                rows[i][202], rows[i][203], rows[i][204], rows[i][205],
                rows[i][222], rows[i][223], rows[i][224], rows[i][225],
                rows[i][226], rows[i][227], rows[i][228], rows[i][229],
                rows[i][246], rows[i][247], rows[i][248], rows[i][249],
                rows[i][250], rows[i][251], rows[i][252], rows[i][253]
            ])
            data['decam_apflux_ivar'].append([
                rows[i][134], rows[i][135], rows[i][136], rows[i][137],
                rows[i][138], rows[i][139], rows[i][140], rows[i][141],
                rows[i][158], rows[i][159], rows[i][160], rows[i][161],
                rows[i][162], rows[i][163], rows[i][164], rows[i][165],
                rows[i][182], rows[i][183], rows[i][184], rows[i][185],
                rows[i][186], rows[i][187], rows[i][188], rows[i][189],
                rows[i][206], rows[i][207], rows[i][208], rows[i][209],
                rows[i][210], rows[i][211], rows[i][212], rows[i][213],
                rows[i][230], rows[i][231], rows[i][232], rows[i][233],
                rows[i][234], rows[i][235], rows[i][236], rows[i][237],
                rows[i][254], rows[i][255], rows[i][256], rows[i][257],
                rows[i][258], rows[i][259], rows[i][260], rows[i][261]
            ])

        c0 = Column(name='cand_id', format='J', array=data['cand_id'])
        c1 = Column(name='brickid', format='J', array=data['brickid'])
        c2 = Column(name='objid', format='J', array=data['objid'])
        c3 = Column(name='type', format='10A', array=data['type'])
        c4 = Column(name='ra', format='D', array=data['ra'])
        c5 = Column(name='ra_ivar', format='E', array=data['ra_ivar'])
        c6 = Column(name='dec', format='D', array=data['dec'])
        c7 = Column(name='dec_ivar', format='E', array=data['dec_ivar'])
        c8 = Column(name='bx', format='D', array=data['bx'])
        c9 = Column(name='by', format='D', array=data['by'])
        c10 = Column(name='bx0', format='E', array=data['bx0'])
        c11 = Column(name='by0', format='E', array=data['by0'])
        c12 = Column(name='ebv', format='E', array=data['ebv'])
        c13 = Column(name='dchisq', format='4D', array=data['dchisq'])
        c14 = Column(name='fracDev', format='E', array=data['fracDev'])
        c15 = Column(name='fracDev_ivar',
                     format='E',
                     array=data['fracDev_ivar'])
        c16 = Column(name='shapeExp_r', format='E', array=data['shapeExp_r'])
        c17 = Column(name='shapeExp_r_ivar',
                     format='E',
                     array=data['shapeExp_r_ivar'])
        c18 = Column(name='shapeExp_e1', format='E', array=data['shapeExp_e1'])
        c19 = Column(name='shapeExp_e1_ivar',
                     format='E',
                     array=data['shapeExp_e1_ivar'])
        c20 = Column(name='shapeExp_e2', format='E', array=data['shapeExp_e2'])
        c21 = Column(name='shapeExp_e2_ivar',
                     format='E',
                     array=data['shapeExp_e2_ivar'])
        c22 = Column(name='shapeDev_r', format='E', array=data['shapeDev_r'])
        c23 = Column(name='shapeDev_r_ivar',
                     format='E',
                     array=data['shapeDev_r_ivar'])
        c24 = Column(name='shapeDev_e1', format='E', array=data['shapeDev_e1'])
        c25 = Column(name='shapeDev_e1_ivar',
                     format='E',
                     array=data['shapeDev_e1_ivar'])
        c26 = Column(name='shapeDev_e2', format='E', array=data['shapeDev_e2'])
        c27 = Column(name='shapeDev_e2_ivar',
                     format='E',
                     array=data['shapeDev_e2_ivar'])
        c28 = Column(name='decam_flux', format='6E', array=data['decam_flux'])
        c29 = Column(name='decam_flux_ivar',
                     format='6E',
                     array=data['decam_flux_ivar'])
        c30 = Column(name='decam_fracflux',
                     format='6E',
                     array=data['decam_fracflux'])
        c31 = Column(name='decam_fracmasked',
                     format='6E',
                     array=data['decam_fracmasked'])
        c32 = Column(name='decam_fracin',
                     format='6E',
                     array=data['decam_fracin'])
        c33 = Column(name='decam_rchi2',
                     format='6E',
                     array=data['decam_rchi2'])
        c34 = Column(name='decam_nobs', format='6I', array=data['decam_nobs'])
        c35 = Column(name='decam_anymask',
                     format='6I',
                     array=data['decam_anymask'])
        c36 = Column(name='decam_allmask',
                     format='6I',
                     array=data['decam_allmask'])
        c37 = Column(name='decam_mw_transmission',
                     format='6E',
                     array=data['decam_mw_transmission'])
        c38 = Column(name='wise_flux', format='4E', array=data['wise_flux'])
        c39 = Column(name='wise_flux_ivar',
                     format='4E',
                     array=data['wise_flux_ivar'])
        c40 = Column(name='wise_fracflux',
                     format='4E',
                     array=data['wise_fracflux'])
        c41 = Column(name='wise_rchi2', format='4E', array=data['wise_rchi2'])
        c42 = Column(name='wise_nobs', format='4I', array=data['wise_nobs'])
        c43 = Column(name='wise_mw_transmission',
                     format='4E',
                     array=data['wise_mw_transmission'])
        c44 = Column(name='decam_apflux',
                     format='48E',
                     array=data['decam_apflux'])
        c45 = Column(name='decam_apflux_resid',
                     format='48E',
                     array=data['decam_apflux_resid'])
        c46 = Column(name='decam_apflux_ivar',
                     format='48E',
                     array=data['decam_apflux_ivar'])
        hdu = fits.BinTableHDU.from_columns([
            c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14,
            c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27,
            c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40,
            c41, c42, c43, c44, c45, c46
        ])

    elif result['table'] == 'CANDIDATE':
        dtypes = [('brickid', 'i4'), ('objid', 'i4'), ('blob', 'i8'),
                  ('type', 'S10'), ('ra', 'float64'), ('ra_ivar', 'float64'),
                  ('dec', 'float64'), ('dec_ivar', 'float64'),
                  ('bx', 'float64'), ('by', 'float64'), ('bx0', 'float64'),
                  ('by0', 'float64'), ('ebv', 'float64'),
                  ('dchisq1', 'float64'), ('dchisq2', 'float64'),
                  ('dchisq3', 'float64'), ('dchisq4', 'float64'),
                  ('fracdev', 'float64'), ('fracdev_ivar', 'float64'),
                  ('shapeexp_r', 'float64'), ('shapeexp_r_ivar', 'float64'),
                  ('shapeexp_e1', 'float64'), ('shapeexp_e1_ivar', 'float64'),
                  ('shapeexp_e2', 'float64'), ('shapeexp_e2_ivar', 'float64'),
                  ('shapedev_r', 'float64'), ('shapedev_r_ivar', 'float64'),
                  ('shapedev_e1', 'float64'), ('shapedev_e1_ivar', 'float64'),
                  ('shapedev_e2', 'float64'), ('shapedev_e2_ivar', 'float64')]
        data = np.zeros(nrows, dtype=dtypes)
        for i in range(0, nrows):
            data[i]['brickid'] = rows[i][0]
            data[i]['objid'] = rows[i][1]
            data[i]['blob'] = rows[i][2]
            data[i]['type'] = str(rows[i][3])
            data[i]['ra'] = rows[i][4]
            data[i]['ra_ivar'] = rows[i][5]
            data[i]['dec'] = rows[i][6]
            data[i]['dec_ivar'] = rows[i][7]
            data[i]['bx'] = rows[i][8]
            data[i]['by'] = rows[i][9]
            data[i]['bx0'] = rows[i][10]
            data[i]['by0'] = rows[i][11]
            data[i]['ebv'] = rows[i][12]
            data[i]['dchisq1'] = rows[i][13]
            data[i]['dchisq2'] = rows[i][14]
            data[i]['dchisq3'] = rows[i][15]
            data[i]['dchisq4'] = rows[i][16]
            data[i]['fracdev'] = rows[i][17]
            data[i]['fracdev_ivar'] = rows[i][18]
            data[i]['shapeexp_r'] = rows[i][19]
            data[i]['shapeexp_r_ivar'] = rows[i][20]
            data[i]['shapeexp_e1'] = rows[i][21]
            data[i]['shapeexp_e1_ivar'] = rows[i][22]
            data[i]['shapeexp_e2'] = rows[i][23]
            data[i]['shapeexp_e2_ivar'] = rows[i][24]
            data[i]['shapedev_r'] = rows[i][25]
            data[i]['shapedev_r_ivar'] = rows[i][26]
            data[i]['shapedev_e1'] = rows[i][27]
            data[i]['shapedev_e1_ivar'] = rows[i][28]
            data[i]['shapedev_e2'] = rows[i][29]
            data[i]['shapedev_e2_ivar'] = rows[i][30]
        hdu = fits.BinTableHDU(data, header=priheader)
    elif result['table'] == 'DECAM':
        # try building from columns
        data = {
            'cand_id': [],
            'decam_flux': [],
            'decam_flux_ivar': [],
            'decam_fracflux': [],
            'decam_fracmasked': [],
            'decam_fracin': [],
            'decam_rchi2': [],
            'decam_nobs': [],
            'decam_anymask': [],
            'decam_allmask': [],
            'decam_ext': []
        }
        for i in range(0, nrows):
            data['cand_id'].append(rows[i][0])
            data['decam_flux'].append([
                rows[i][1], rows[i][11], rows[i][21], rows[i][31], rows[i][41],
                rows[i][51]
            ])
            data['decam_flux_ivar'].append([
                rows[i][2], rows[i][12], rows[i][22], rows[i][32], rows[i][42],
                rows[i][52]
            ])
            data['decam_fracflux'].append([
                rows[i][3], rows[i][13], rows[i][23], rows[i][33], rows[i][43],
                rows[i][53]
            ])
            data['decam_fracmasked'].append([
                rows[i][4], rows[i][14], rows[i][24], rows[i][34], rows[i][44],
                rows[i][54]
            ])
            data['decam_fracin'].append([
                rows[i][5], rows[i][15], rows[i][25], rows[i][35], rows[i][45],
                rows[i][55]
            ])
            data['decam_rchi2'].append([
                rows[i][6], rows[i][16], rows[i][26], rows[i][36], rows[i][46],
                rows[i][56]
            ])
            data['decam_nobs'].append([
                rows[i][7], rows[i][17], rows[i][27], rows[i][37], rows[i][47],
                rows[i][57]
            ])
            data['decam_anymask'].append([
                rows[i][8], rows[i][18], rows[i][28], rows[i][38], rows[i][48],
                rows[i][58]
            ])
            data['decam_allmask'].append([
                rows[i][9], rows[i][19], rows[i][29], rows[i][39], rows[i][49],
                rows[i][59]
            ])
            data['decam_ext'].append([
                rows[i][10], rows[i][20], rows[i][30], rows[i][40],
                rows[i][50], rows[i][60]
            ])

        c1 = Column(name='cand_id', format='J', array=data['cand_id'])
        c2 = Column(name='decam_flux', format='6D', array=data['decam_flux'])
        c3 = Column(name='decam_flux_ivar',
                    format='6D',
                    array=data['decam_flux_ivar'])
        c4 = Column(name='decam_fracflux',
                    format='6D',
                    array=data['decam_fracflux'])
        c5 = Column(name='decam_fracmasked',
                    format='6D',
                    array=data['decam_fracmasked'])
        c6 = Column(name='decam_fracin',
                    format='6D',
                    array=data['decam_fracin'])
        c7 = Column(name='decam_rchi2', format='6D', array=data['decam_rchi2'])
        c8 = Column(name='decam_nobs', format='6D', array=data['decam_nobs'])
        c9 = Column(name='decam_anymask',
                    format='6D',
                    array=data['decam_anymask'])
        c10 = Column(name='decam_allmask',
                     format='6D',
                     array=data['decam_allmask'])
        c11 = Column(name='decam_ext', format='6D', array=data['decam_ext'])
        hdu = fits.BinTableHDU.from_columns(
            [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11])
        # dtypes = [('cand_id', 'i4'),('decam_flux',np.float64(6,)),
        #     ('decam_flux_ivar',np.float64(6,)),('decam_fracflux',np.float64(6,)),('decam_fracmasked',np.float64(6,)),('decam_fracin',np.float64(6,)),
        #     ('decam_rchi2',np.float64(6,)),('decam_nobs',np.float64(6,)),('decam_anymask',np.float64(6,)),('decam_allmask',np.float64(6,)),('decam_ext',np.float64(6,))]
        # data = np.zeros((nrows,6), dtype=dtypes)

    elif result['table'] == 'WISE':
        dtypes = [()]
        #line_cand = [ tbdata['brickid'][i], tbdata['objid'][i], tbdata['blob'][i], tbdata['type'][i], tbdata['ra'][i], tbdata['ra_ivar'][i], tbdata['dec'][i], tbdata['dec_ivar'][i], tbdata['bx'][i], tbdata['by'][i], tbdata['bx0'][i], tbdata['by0'][i], bool(lb), bool(oob), tbdata['ebv'][i], tbdata['dchisq'][i][0], tbdata['dchisq'][i][1], tbdata['dchisq'][i][2], tbdata['dchisq'][i][3], tbdata['fracDev'][i], tbdata['fracDev_ivar'][i], tbdata['shapeExp_r'][i], tbdata['shapeExp_r_ivar'][i], tbdata['shapeExp_e1'][i], tbdata['shapeExp_e1_ivar'][i], tbdata['shapeExp_e2'][i], tbdata['shapeExp_e2_ivar'][i], tbdata['shapeDev_r'][i], tbdata['shapeDev_r_ivar'][i], tbdata['shapeDev_e1'][i], tbdata['shapeDev_e1_ivar'][i], tbdata['shapeDev_e2'][i], tbdata['shapeDev_e2_ivar'][i] ]

        # for later
        # ('decam_flux','f8',(3,4)),('decam_flux_ivar','f8',(3,4)),('decam_apflux','f8',(3,4)),
        # ('decam_apflux_resid','f8',(3,4)),('decam_apflux_ivar','f8',(3,4)),('decam_mw_transmission','f8',(3,4)),
        # ('decam_nobs','f8',(3,4)),('decam_rchi2','f8',(3,4)),('decam_fracflux','f8',(3,4)),('decam_fracmasked','f8',(3,4)),
        # ('decam_fracin','f8',(3,4)),('decam_saturated','f8',(3,4)),('out_of_bounds','f8',(3,4)),('decam_anymask','f8',(3,4)),
        # ('decam_allmask','f8',(3,4))

    outfile = 'data.fits'

    fits.writeto(outfile, hdu.data, hdu.header, clobber=True)
    fsock = open(outfile, "rb")
    response = StreamingHttpResponse(fsock, content_type='application/fits')
    response['Content-Disposition'] = 'attachment; filename="' + outfile + '"'
    return response
Exemplo n.º 17
0
                                            return_S2=True,
                                            ncores=ncores)

table = [
    fits.Column(name='rmpc', format='E', array=r),
    fits.Column(name='xmpc', format='E', array=x),
    fits.Column(name='ympc', format='E', array=y),
    fits.Column(name='S0', format='E', array=S0),
    fits.Column(name='DS0', format='E', array=DS0),
    fits.Column(name='S', format='E', array=S),
    fits.Column(name='DS', format='E', array=DS),
    fits.Column(name='Gt', format='E', array=gt),
    fits.Column(name='S2', format='E', array=s2),
    fits.Column(name='Gx', format='E', array=gx)
]

tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs(table))

h = fits.Header()
h.append(('lM200', np.round(lM200_miss, 4)))
h.append(('soff', np.round(c200_miss, 4)))
h.append(('c200', np.round(soff, 4)))
h.append(('q', np.round(q, 4)))

primary_hdu = fits.PrimaryHDU(header=h)

hdul = fits.HDUList([primary_hdu, tbhdu])

hdul.writeto(folder + 'mapas/mapa_bin_' + sampname + '_miss.fits',
             overwrite=True)
Exemplo n.º 18
0
    def createLCproperties(self, z, appmag, sig, fname, lag_frac, fdir=None):
        '''
        Generates a few parameters necessary for AGN simulation.
        Parameters are saved to zeroeth extension header of .fits-file
        saved with name and directory specified by input parameters.
        If an identical file already exists, the file will be overwritten.
        ---
        INPUT:
        z: float
          Redshift of the AGN we wish to create.
        appmag: float
          Apparent magnitude (in r-band) of the AGN we wish to create.
        fdir: string
          Directory where we wish to save the .fits-file containing the
          light-curves representing the AGN we are about to create.
        fname: string
          Name of .fits-file containing the light-curve representing the
          AGN we are about to create. Do not include ".fits".
        ---
        '''

        # Define AGN parameters directly dependent on z and rmag
        waveZ, lum, Lbol = self.getLbol(z, appmag, self.gFilter)
        absMag = self.getAbsMag(z, appmag, Lbol, self.iFilter)
        lL5100, lL3000, lL1350, Hbeta, MgII, CIV = self.getLag(z, waveZ, lum)

        Hbeta *= lag_frac
        MgII *= lag_frac
        CIV *= lag_frac
        
        
        if z <= 0.62: # H beta
            logmBH = np.random.normal(8.23,0.37)-9.
        elif z <= 1.8 and z >0.62: # MgII
            logmBH = np.random.normal(9.65,0.39)-9.
        else: # CIV
            logmBH = np.random.normal(9.55,0.27)-9.
        '''
        if z <= 0.62: # H beta
            lag = Hbeta/(1+z)
        elif z <= 1.8 and z >0.62: # MgII
            lag = MgII/(1+z)
        else: # CIV
            lag = CIV/(1+z)
        
        
        
        G = 6.67*10**-11
        c = 2.998*10**8
        f = 4.47
        Msun = 1.989*10**30
    
        lag = lag*86400
        velocity = velocity*1000
    
        logmBH = np.log10(f*(pow(velocity, 2)*c*lag/G)/Msun)-9
        '''

        # Define SFinfty and tau parameters based on MacLeod+10
        Asf  = -0.51
        Bsf  = -0.479
        Csf  = 0.131
        Dsf  = 0.18
        Atau = 2.4
        Btau = 0.17
        Ctau = 0.03
        Dtau = 0.21

        # Define wavelengths for specific emission lines
        LamH  = 5100.
        LamMg = 3000.
        LamC  = 1350.

        # Define SFinfty and tau for each emission line
        
        # use 2*std of cont data
        sfH = 2.*sig
        sfMg = 2.*sig
        sfC = 2.*sig 
        #sfH   = 10**( Asf  + Bsf*np.log10(LamH/4000)   + Csf*(absMag+23)  + Dsf*logmBH )
        #sfMg  = 10**( Asf  + Bsf*np.log10(LamMg/4000)  + Csf*(absMag+23)  + Dsf*logmBH )
        #sfC   = 10**( Asf  + Bsf*np.log10(LamC/4000)   + Csf*(absMag+23)  + Dsf*logmBH )
        tauH  = 10**( Atau + Btau*np.log10(LamH/4000)  + Ctau*(absMag+23) + Dtau*logmBH ) * (1.+z)
        tauMg = 10**( Atau + Btau*np.log10(LamMg/4000) + Ctau*(absMag+23) + Dtau*logmBH ) * (1.+z)
        tauC  = 10**( Atau + Btau*np.log10(LamC/4000)  + Ctau*(absMag+23) + Dtau*logmBH ) * (1.+z)

        # Create .fits-header
        hdr = fits.Header()
        hdr['Z'] = z                    # Redshift
        hdr['L'] = Lbol                 # Bolometric luminosity
        hdr['MBH'] = 10**(logmBH+9)     # Black hole mass
        hdr['SFH'] = sfH                # SFinfty for Hbeta
        hdr['SFM'] = sfMg               # SFinfty for MgII
        hdr['SFC'] = sfC                # SFinfty for CIV
        hdr['TAUDH'] = tauH             # tau for Hbeta
        hdr['TAUDM'] = tauMg            # tau for MgII
        hdr['TAUDC'] = tauC             # tau for CIV
        hdr['LAMBDAH'] = LamH           # Wavelength of Hbeta
        hdr['LAMBDAM'] = LamMg          # Wavelength of MgII
        hdr['LAMBDAC'] = LamC           # Wavelength of CIV
        hdr['LAMLUMH'] = lL5100         # Wavelength * spectral luminosity for Hbeta
        hdr['LAMLUMM'] = lL3000         # Wavelength * spectral luminosity for MgII
        hdr['LAMLUMC'] = lL1350         # Wavelength * spectral luminosity for CIV
        hdr['AGNLAGH'] = Hbeta          # AGN Hbeta lag
        hdr['AGNLAGM'] = MgII           # AGN MgII lag
        hdr['AGNLAGC'] = CIV            # AGN CIV lag
        hdr['RAPPMAG'] = appmag         # Apparent magnitude through r-band filter
        hdr['IABSMAG'] = absMag         # Absolute magnitude through i-band filter
        hdu = fits.PrimaryHDU(header=hdr)

        # Check whether directory is specified, name .fits-file accordingly
        if fdir:
            if not os.path.exists(fdir): os.makedirs(fdir)
            ffile = fdir+fname+'_sim.fits'
        else:
            ffile = fname+'_sim.fits'
        # End if-statement

        # Save .fits-file
        if os.path.isfile(ffile): os.remove(ffile)
        hdu.writeto(ffile)
Exemplo n.º 19
0
                os.path.join(
                    rootdir,
                    'sector001_camera{camera:d}_ccd{ccd:d}.hdf5'.format(
                        camera=camera, ccd=ccd)), 'r') as hdf:

            N = len(hdf['images'])
            a = np.full(N, np.NaN)
            b = np.full(N, np.NaN)
            cno = np.arange(0, N, 1)

            for k in trange(N):
                if hdf['quality'][k] == 0:
                    hdr_string = hdf['wcs']['%04d' % k][0]
                    if not isinstance(hdr_string, str):
                        hdr_string = hdr_string.decode("utf-8")  # For Python 3
                    wcs = WCS(header=fits.Header().fromstring(hdr_string),
                              relax=True)

                    xycen = wcs.all_world2pix(np.atleast_2d(camera_centre),
                                              0,
                                              ra_dec_order=True)

                    a[k] = xycen[0][0]
                    b[k] = xycen[0][1]

            am = np.nanmedian(a)
            bm = np.nanmedian(b)

            plt.figure()
            plt.scatter(cno, a)
            plt.axhline(am)
Exemplo n.º 20
0
def create_catalog(snap_name, z):
    """
	Creates summary catalog for AGN only"""
    #
    fileList_snap = n.array(
        glob.glob(
            os.path.join(os.environ["MD10"], 'work_agn',
                         'out_' + snap_name + '_SAM_Nb_?.fits')))
    fileList_ms = n.array(
        glob.glob(
            os.path.join(os.environ["MD10"], 'work_agn',
                         'out_' + snap_name + '_SAM_Nb_?_Ms.fits')))
    fileList_Xray = n.array(
        glob.glob(
            os.path.join(os.environ["MD10"], 'work_agn',
                         'out_' + snap_name + '_SAM_Nb_?_Xray.fits')))
    fileList_snap.sort()
    fileList_ms.sort()
    fileList_Xray.sort()

    out_snap = os.path.join(os.environ['MD10'], "catalogs",
                            'out_' + snap_name + "_AGN_snapshot.fits")
    out_ms = os.path.join(os.environ['MD10'], "catalogs",
                          'out_' + snap_name + "_AGN_Ms.fits")
    out_xray = os.path.join(os.environ['MD10'], "catalogs",
                            'out_' + snap_name + "_AGN_Xray.fits")

    # loops over files
    dat_snap = []
    dat_ms = []
    dat_xray = []
    LX = []
    index = n.searchsorted(z_vals, z)

    for fileSnap, fileMs, fileXray in zip(fileList_snap, fileList_ms,
                                          fileList_Xray):
        print fileSnap
        print fileMs
        print fileXray
        hd = fits.open(fileSnap)
        hm = fits.open(fileMs)
        hx = fits.open(fileXray)

        MS = hm[1].data['stellar_mass_Mo13_mvir']
        SAR = hx[1].data['lambda_sar_Bo16']
        agn = (hx[1].data['activity']) & (MS > 0)
        lognh = hx[1].data['log_NH_Buchner2017']

        dat_snap.append(hd[1].data[agn])
        dat_ms.append(hm[1].data[agn])
        dat_xray.append(hx[1].data[agn])

        percent_observed = obscuration_interpolation_grid[index](lognh)
        LX.append(n.log10(10**(MS[agn] + SAR[agn]) * percent_observed[agn]))

    hdu_cols = fits.ColDefs(n.hstack((dat_snap)))
    print "snap", n.hstack((dat_snap)).shape
    tb_hdu = fits.BinTableHDU.from_columns(hdu_cols)
    #define the header
    prihdr = fits.Header()
    prihdr['author'] = 'JC'
    prihdr['info'] = 'snapshot'
    prihdu = fits.PrimaryHDU(header=prihdr)
    #writes the file
    thdulist = fits.HDUList([prihdu, tb_hdu])
    if os.path.isfile(out_snap):
        os.system("rm " + out_snap)

    thdulist.writeto(out_snap)

    hdu_cols = fits.ColDefs(n.hstack((dat_ms)))
    print "ms", n.hstack((dat_ms)).shape
    tb_hdu = fits.BinTableHDU.from_columns(hdu_cols)
    #define the header
    prihdr = fits.Header()
    prihdr['author'] = 'JC'
    prihdr['info'] = 'ms'
    prihdu = fits.PrimaryHDU(header=prihdr)
    #writes the file
    thdulist = fits.HDUList([prihdu, tb_hdu])
    if os.path.isfile(out_ms):
        os.system("rm " + out_ms)

    thdulist.writeto(out_ms)

    hdu_cols = fits.ColDefs(n.hstack((dat_xray)))
    hdu_cols.add_col(
        fits.Column(name='LX_05_2_keV', format='D', array=n.hstack((LX))))
    print "xray", n.hstack((dat_xray)).shape
    tb_hdu = fits.BinTableHDU.from_columns(hdu_cols)
    #define the header
    prihdr = fits.Header()
    prihdr['author'] = 'JC'
    prihdr['info'] = 'xray'
    prihdu = fits.PrimaryHDU(header=prihdr)
    #writes the file
    thdulist = fits.HDUList([prihdu, tb_hdu])
    if os.path.isfile(out_xray):
        os.system("rm " + out_xray)

    thdulist.writeto(out_xray)
    theta = float(hdulist[0].header['BPA']) # degree, from +Y, https://github.com/astropy/astropy/issues/3550
    print('psf major minor theta %.4f %.4f %.2f'%(stddev_major, stddev_minor, theta))
    
    #theta = 0.0 # theta = 0.0 is +X direction! checked by dzliu 20170305.
    #theta = 90.0/180.0*numpy.pi # theta = 90.0 is +Y direction! checked by dzliu 20170305.
    
    PsfModel = Gaussian2D(1.0000, 0.0, 0.0, stddev_major/pixscale, stddev_minor/pixscale, (theta+90.0)/180.0*numpy.pi) # amplitude, x_mean, y_mean, x_stddev, y_stddev, theta
    PsfNAXIS = numpy.max([stddev_major, stddev_minor]) / pixscale * 15.0
    PsfNAXIS = numpy.round((PsfNAXIS-1.0)/2.0)*2+1 # make it odd number
    PsfNAXIS = numpy.array([PsfNAXIS, PsfNAXIS])
    print('psf model model naxis %d %d'%(PsfNAXIS[0], PsfNAXIS[1]))
    PsfModel.bounding_box = ((-(PsfNAXIS[0]-1)/2, (PsfNAXIS[0]-1)/2), (-(PsfNAXIS[1]-1)/2, (PsfNAXIS[1]-1)/2))
    PsfImage = PsfModel.render()
    #print(PsfModel.render())
    
    hdr = fits.Header()
    hdr['BMAJ'] = hdulist[0].header['BMAJ']
    hdr['BMIN'] = hdulist[0].header['BMIN']
    hdr['BPA'] = hdulist[0].header['BPA']
    hdr['PIXSCALE'] = pixscale
    hdu = fits.PrimaryHDU(PsfImage, header=hdr)
    hduli = fits.HDUList([hdu])
    
    # output to outfile
    if os.path.isfile(outfile):
        os.system('mv \"%s\" \"%s\"'%(outfile, outfile.replace('.fits','.backup.fits')))
        print('Backed up existing \"%s\" as \"%s\"!'%(outfile, outfile.replace('.fits','.backup.fits')))
    hduli.writeto(outfile)
    print('Output to \"%s\"!'%(outfile))

Exemplo n.º 22
0
 def head(self):
     """Returns the first Window or an empty fits.Header if there is
     no first Window. The header of the first Window is where general
     header items of the CCD are stored."""
     return next(iter(self.values()), fits.Header())
Exemplo n.º 23
0
def cube2spec(cube,x,y,s,write=None,shape='box',helio=0,mask=None,twod=True,tovac=False,idsource=None):

    """ 
    Extract a 1D spectrum from a cube at position x,y in box or circle of radius s 

    If shape = 'mask', then mask is a boolean mask and pixels within it will be extracted form 
    argument mask. Mask is a datacube [e.g. from cubex]

    idsource -> if > 0, then only pixels in mask with that ID will be extracted

    helio passes an heliocentric correction in km/s [should be 0 with pipeline v1.2.1]

    twod -> also reconstruct a 2D spec

    tovac -> if true, return wavelengths in vacuum 

    write -> output file 

    """
    import matplotlib.pyplot as plt
    import numpy as np
    from astropy.io import fits 

    #read the cube
    cubdata,vardata,wcsc,wavec,regi=readcube(cube,helio=helio)
    cubdata=np.nan_to_num(cubdata)

    #if mask extract all True pixels 
    if('mask' in shape):
        if(idsource):
            goodpix=np.nonzero(mask == idsource)
        else:
            goodpix=np.nonzero(mask)
        xpix=goodpix[1]
        ypix=goodpix[2]
    else:
        #If user defined region, grab inner pixels
        #cut region of interest according to shape
        xpix=[]
        ypix=[]
        xside=np.arange(x-s-1,x+s+1,1)
        yside=np.arange(y-s-1,y+s+1,1)
        for xx in xside:
            for yy in yside:
                if('box' in shape):
                    if((abs(xx-x) <= s) & (abs(yy-y) <= s)):
                        xpix.append(xx)
                        ypix.append(yy)
                if('circ' in shape):
                    dist=np.sqrt((xx-x)**2+(yy-y)**2)
                    if(dist <= s):
                        xpix.append(xx)
                        ypix.append(yy)
                        
    #Some checks...
    #cbmed=np.median(cubdata,axis=0)
    #cbmed[xpix,ypix]=100000
    #imgplot=plt.imshow(cbmed,origin='lower')
    #imgplot.set_clim(-5,5)
    #plt.show()

    #now make space for the 1d spectrum 
    spec_flx=np.zeros(len(wavec))
    spec_var=np.zeros(len(wavec))
    spec_med=np.zeros(len(wavec))

    #if want 2d, prepapre space for it
    #This simulates a slit in the x direction 
    #adding up all the flux on the y
    if(twod):
        #find unique pixels (not all x,y) - need to sort for later
        uxpix=np.sort(list(set(xpix)))
        uypix=np.sort(list(set(ypix)))
        npix=len(uxpix)
        nwv=len(wavec)
        twodspec=np.zeros((nwv,npix))
        twoderr=np.zeros((nwv,npix))

    #loop over all wavelength to fill in spectrum
    for ii,ww in enumerate(wavec):
        #get the total spec in the aperture, 
        #summing over all the pixels 
        spec_flx[ii]=np.sum(cubdata[ii,xpix,ypix])
        spec_var[ii]=np.sum(vardata[ii,xpix,ypix])
        spec_med[ii]=np.median(cubdata[ii,xpix,ypix])
        
        #fill in 2D spectrum in a box 
        if(twod):
            #sum only on the full x-extent
            for jj in range(npix):
                #add all the pixels in y
                twodspec[ii,jj]=np.sum(cubdata[ii,uxpix[jj],uypix])
                twoderr[ii,jj]=np.sum(vardata[ii,uxpix[jj],uypix])
   
    #extract the 2D image with a small buffer around
    if(twod):
        twodimg=np.median(cubdata[:,uxpix[0]-5:uxpix[-1]+6,uypix[0]-5:uypix[-1]+6],axis=0)
        #from variance to error
        twoderr=np.sqrt(twoderr)

    #mean in aperture
    #totpix=len(xpix)
    #spec_flx=spec_flx/totpix
    #spec_err=np.sqrt(spec_var/totpix)

    #keep total spectrum and not mean
    totpix=len(xpix)
    spec_err=np.sqrt(spec_var)
    
    #if set, convert to vacuum using airtovac.pro conversion
    if(tovac):
        #save current wave
        wavec=np.array(wavec,dtype=np.float64)
        wave_air=wavec
        
        sigma2 = (1e4/wavec)**2.    
        fact = 1.+5.792105e-2/(238.0185-sigma2)+1.67917e-3/(57.362-sigma2)
        wavec = wavec*fact  

    #tested and working
    #fl=open('test.txt','w') 
    #for rr in range(len(wavec)):
    #    fl.write("{} {}\n".format(wave_air[rr],wavec[rr]))
    #fl.close()

    #if write, write
    if(write):
        prihdr = fits.Header()
        prihdr['NPIX'] = totpix
        hduflx  = fits.PrimaryHDU(spec_flx,header=prihdr) #total in region
        hduerr  = fits.ImageHDU(spec_err) #associated errors
        hduwav  = fits.ImageHDU(wavec)    #wave
        hdumed  = fits.ImageHDU(spec_med) #median spectrum 
        if(twod): #twod 
            hdu2flx  = fits.ImageHDU(twodspec)
            hdu2err  = fits.ImageHDU(twoderr)
            hduimg   = fits.ImageHDU(twodimg)
            hdulist = fits.HDUList([hduflx,hduerr,hduwav,hdumed,hdu2flx,hdu2err,hduimg])
        else:
            hdulist = fits.HDUList([hduflx,hduerr,hduwav,hdumed])
        hdulist.writeto(write,clobber=True)

    return wavec, spec_flx, spec_err, spec_med
Exemplo n.º 24
0
    def write(self, fname, overwrite=False, xgap=200, ygap=200):
        """Writes out the MCCD to a FITS file.

        Arguments::

            fname : string or file-like object
                Name of file to write to. Can also be a file, opened in
                writeable binary mode.

            overwrite : bool
                True to overwrite pre-existing files

            xgap  : int
               X-gap used to space CCDs for ds9 mosaicing (unbinned pixels)

            ygap  : int
               Y-gap used to space CCDs for ds9 mosaicing (unbinned pixels)

        """

        phead = self.head.copy()
        phead["NUMCCD"] = (len(self), "Number of CCDs")
        phead["HIPERCAM"] = ("MCCD", "Type of HiPERCAM data (CCD | MCCD)")

        # Add comments if not already present.
        comm1 = "Data representing multiple CCDs written by hipercam.MCCD.write."
        if "COMMENT" not in phead or str(phead["COMMENT"]).find(comm1) == -1:
            phead.add_comment(comm1)
            phead.add_comment(
                "Each window of each CCD is written in a series of HDUs following an"
            )
            phead.add_comment(
                "HDU containing only the header. These follow an overall header for the"
            )
            phead.add_comment(
                "MCCD containing top-level information. The headers of the data window"
            )
            phead.add_comment(
                "HDUs have keywords LLX, LLY giving the pixel location in unbinned"
            )
            phead.add_comment(
                "pixels and XBIN and YBIN for their binning factors. The total unbinned"
            )
            phead.add_comment(
                "dimensions of each CCD are stored under keywords NXTOT and NYTOT for"
            )
            phead.add_comment(
                "each CCD. Each HDU associated with a given CCD is labelled with a"
            )
            phead.add_comment("header keyword CCD.")

        # make the first HDU
        hdul = fits.HDUList()
        hdul.append(fits.PrimaryHDU(header=fits.Header(phead.cards)))

        # add in the HDUs of all the CCDs NX = 3 specific to HiPERCAM but
        # should do reasonably generally I think.
        xoff, yoff, noff = 0, 0, 0
        NX = 3
        for cnam, ccd in self.items():
            ccd.whdul(hdul, cnam, xoff, yoff)
            noff += 1
            if noff % NX == 0:
                xoff = 0
                yoff -= (ccd.nytot + 2 * ccd.nypad) + ygap
            else:
                xoff += (ccd.nxtot + 2 * ccd.nxpad) + xgap
        hdul.writeto(fname, overwrite=overwrite)
Exemplo n.º 25
0
def writeSingleFITS(data, wcs, output, template, clobber=True, verbose=True):
    """ Write out a simple FITS file given a numpy array and the name of another
    FITS file to use as a template for the output image header.
    """
    outname, outextn = fileutil.parseFilename(output)
    outextname, outextver = fileutil.parseExtn(outextn)

    if fileutil.findFile(outname):
        if clobber:
            log.info('Deleting previous output product: %s' % outname)
            fileutil.removeFile(outname)

        else:
            log.warning('Output file %s already exists and overwrite not '
                        'specified!' % outname)
            log.error('Quitting... Please remove before resuming operations.')
            raise IOError

    # Now update WCS keywords with values from provided WCS
    if hasattr(wcs.sip, 'a_order'):
        siphdr = True
    else:
        siphdr = False
    wcshdr = wcs.wcs2header(sip2hdr=siphdr)

    if template is not None:
        # Get default headers from multi-extension FITS file
        # If input data is not in MEF FITS format, it will return 'None'
        # NOTE: These are HEADER objects, not HDUs
        (prihdr, scihdr, errhdr,
         dqhdr), newtab = getTemplates(template, EXTLIST)

        if scihdr is None:
            scihdr = fits.Header()
            indx = 0
            for c in prihdr.cards:
                if c.keyword not in ['INHERIT', 'EXPNAME']: indx += 1
                else: break
            for i in range(indx, len(prihdr)):
                scihdr.append(prihdr.cards[i])
            for i in range(indx, len(prihdr)):
                del prihdr[indx]
    else:
        scihdr = fits.Header()
        prihdr = fits.Header()
        # Start by updating PRIMARY header keywords...
        prihdr.set('EXTEND', value=True, after='NAXIS')
        prihdr['FILENAME'] = outname

    if outextname == '':
        outextname = 'sci'
    if outextver == 0: outextver = 1
    scihdr['EXTNAME'] = outextname.upper()
    scihdr['EXTVER'] = outextver

    for card in wcshdr.cards:
        scihdr[card.keyword] = (card.value, card.comment)

    # Create PyFITS HDUList for all extensions
    outhdu = fits.HDUList()
    # Setup primary header as an HDU ready for appending to output FITS file
    prihdu = fits.PrimaryHDU(header=prihdr)
    scihdu = fits.ImageHDU(header=scihdr, data=data)

    outhdu.append(prihdu)
    outhdu.append(scihdu)
    outhdu.writeto(outname)

    if verbose:
        print('Created output image: %s' % outname)
Exemplo n.º 26
0
def parse(file, psfTable, outFolder, pixelPlane):
    '''
    a parser for G4output.
    It's easier to parallelize if written as a separate function.
    '''
    # initialization
    lineNum = 0
    nTrk = 0  # count number of tracks
    # prepare files
    print 'parsing # ', file
    f = open(file, 'r')
    lines = f.readlines()
    fileIdx = re.split('\.', file)[0]
    x_tmp = []
    y_tmp = []
    z_tmp = []
    dE_tmp = []
    xDir = -1
    yDir = -1

    if os.path.isfile('%s/%s.fits' % (outFolder, fileIdx)):
        return

    for line in lines:
        if line[0] == '*' and x_tmp:
            print 'see * in ', file
        elif 'physiTracker' not in line:
            continue
        elif 'physiTracker' in line:
            lineSplit = re.split(r'\s*[(), \s]\s*', line)
            if lineSplit[0] == '':
                lineSplit = lineSplit[1:]
            if not x_tmp:
                xDir = float(lineSplit[7])
                yDir = float(lineSplit[8])
                xInit = float(lineSplit[0])
                yInit = float(lineSplit[1])
                eInit = float(lineSplit[5])
            x_tmp.append(float(lineSplit[0]))
            y_tmp.append(float(lineSplit[1]))
            z_tmp.append(float(lineSplit[2]))
            dE_tmp.append(float(lineSplit[6]))
    else:
        # executed after 'for' terminates normally
        # when 'for' terminates normally, it reaches the end of file
        if x_tmp:
            x = []
            y = []
            z = []
            dE = []
            x = np.array(x_tmp)
            y = np.array(y_tmp) - 2000.
            z = np.array(z_tmp)
            dE = np.array(dE_tmp)
            alpha_true = rad2deg(arctan(xDir / yDir))
            if alpha_true < 0:
                alpha_true += 360.
            del x_tmp, y_tmp, z_tmp, dE_tmp
            track, row0_pos_um, col0_pos_um = XYZdE2track(
                x, y, z, dE, psfTable, pixelPlane)
            h = fits.Header()
            h['row0_um'] = row0_pos_um
            h['col0_um'] = col0_pos_um
            h['alphaT'] = alpha_true
            h['xInit'] = xInit
            h['yInit'] = yInit
            h['eInit'] = eInit
            prihdu = fits.PrimaryHDU(track, header=h)
            row_pix = (y - row0_pos_um) / 10.5
            col_pix = (x - col0_pos_um) / 10.5
            row = fits.Column(name='row', format='F', array=row_pix)
            col = fits.Column(name='col', format='F', array=col_pix)
            tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs([row, col]))
            hdulist = fits.HDUList([prihdu, tbhdu])
            hdulist.writeto('%s/%s.fits' % (outFolder, fileIdx))
        f.close()
Exemplo n.º 27
0
                      unpack=True,
                      dtype='float',
                      skiprows=1)

    # Rebinando de modo a ter um passo constante, pequeno o suficiente para nao perder informacoes
    rebin = interpolate.interp1d(l, f)
    # calculando o tamanho do passo no inicio do espectro e pegando soh 10%
    binsize = 0.1 * (l[1] - l[0])
    # criando um array com os novos comprimentos de onda (rebinados)
    n_l = np.arange(l[0], l[-1], binsize)
    # Interpolando os antigos para poder calcular os novos valores
    n_f = rebin(n_l) * NormalizationFactor
    # criando a lista de HDU (para fazer o fits)
    savefile = pf.HDUList()
    # criando a lista de header (para fazer o fits)
    hdr = pf.Header()
    # Adicionando keywords aos header
    hdr.append(('OBJECT', sourceName.split('.')[0]))
    hdr.append(('CTYPE1', 'LINEAR'))
    #  hdr.append(('CRPIX1',float(n_l[0])))
    hdr.append(('CRVAL1', float(n_l[0])))
    hdr.append(('CD1_1', float(binsize)))
    #hdr.append(('WAT1_001', 'wtype=linear axtype=wave' ))
    # Salvando os valores de fluxo no HDU
    savefile.append(pf.ImageHDU(data=n_f, header=hdr))
    # grando o HDU em fits
    savefile.writeto(sourceName.split('.')[0] + '.fits', overwrite=True)
    if savetxt:

        save = np.column_stack((n_l, n_f))
        np.savetxt(sourceName.split('.')[0] + '.txt',
Exemplo n.º 28
0
for i in range(5, 10):
    h = fits.open('trnslos_20180414_%i.fits' % i)
    trnslos[(i - 5) * 1000:(i - 4) * 1000] = h[0].data
    h.close()
'''open training slopes'''
h = fits.open('trnslos_20180405.fits')
trnslos_all = h[0].data
h.close()
'''remove mean from raw x and y slopes'''
trnslos = trnslos.reshape((5000 * 100 * 2, 36))
nor_trnslos = np.empty((1000000, 36))
for i in range(1000000):
    tmp = trnslos[i].copy()
    nor_trnslos[i] = tmp - tmp.mean()
nor_trnslos = nor_trnslos.reshape((5000, 7200))
header = fits.Header()
header["r_0"] = str([0.16])
header["WINDSPD"] = str([5, 6, 7, 8, 9])
header["WINDDIR"] = str([0])
header["SAVETIME"] = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
header["ITERS"] = str([1000])
header['ITERTIME'] = str([0.012])
fits.writeto('trnslos_20180416.fits', nor_trnslos)
'''multi sets augmentation'''
n_frame = 100
trnslos = np.zeros((15000, 72 * n_frame))
h = fits.open('trnslos_20180414.fits')
tmp = h[0].data
h.close()
for i in range(0, 15, 3):
    j = i / 3
Exemplo n.º 29
0
def _make_image_header(
    nxpix=100,
    nypix=100,
    binsz=0.1,
    xref=0,
    yref=0,
    proj="CAR",
    coordsys="GAL",
    xrefpix=None,
    yrefpix=None,
):
    """Generate a FITS header from scratch.

    Uses the same parameter names as the Fermi tool gtbin.

    If no reference pixel position is given it is assumed ot be
    at the center of the image.

    Parameters
    ----------
    nxpix : int, optional
        Number of pixels in x axis. Default is 100.
    nypix : int, optional
        Number of pixels in y axis. Default is 100.
    binsz : float, optional
        Bin size for x and y axes in units of degrees. Default is 0.1.
    xref : float, optional
        Coordinate system value at reference pixel for x axis. Default is 0.
    yref : float, optional
        Coordinate system value at reference pixel for y axis. Default is 0.
    proj : string, optional
        Projection type. Default is 'CAR' (cartesian).
    coordsys : {'CEL', 'GAL'}, optional
        Coordinate system. Default is 'GAL' (Galactic).
    xrefpix : float, optional
        Coordinate system reference pixel for x axis. Default is None.
    yrefpix: float, optional
        Coordinate system reference pixel for y axis. Default is None.

    Returns
    -------
    header : `~astropy.io.fits.Header`
        Header
    """
    nxpix = int(nxpix)
    nypix = int(nypix)
    if not xrefpix:
        xrefpix = (nxpix + 1) / 2.
    if not yrefpix:
        yrefpix = (nypix + 1) / 2.

    if coordsys == "CEL":
        ctype1, ctype2 = "RA---", "DEC--"
    elif coordsys == "GAL":
        ctype1, ctype2 = "GLON-", "GLAT-"
    else:
        raise ValueError("Unsupported coordsys: {!r}".format(coordsys))

    pars = {
        "NAXIS": 2,
        "NAXIS1": nxpix,
        "NAXIS2": nypix,
        "CTYPE1": ctype1 + proj,
        "CRVAL1": xref,
        "CRPIX1": xrefpix,
        "CUNIT1": "deg",
        "CDELT1": -binsz,
        "CTYPE2": ctype2 + proj,
        "CRVAL2": yref,
        "CRPIX2": yrefpix,
        "CUNIT2": "deg",
        "CDELT2": binsz,
    }

    header = fits.Header()
    header.update(pars)

    return header
Exemplo n.º 30
0
def split_file(N_side, original_filename, file_number, save_location,
               output_format):
    #Open the original .fits file, and extract the data we need.
    print('Opening ', original_filename, ' ...')
    initial = fits.open(original_filename)

    #NORMALISE THE DELTA FIELD SO THAT ITS MEAN IS 0.
    #THIS IS AN ONGOING ISSUE IN COLORE, AND IS LISTED TO BE FIXED SO THAT THE DELTA MEAN IS 0 AUTOMATICALLY.
    #initial = stats.normalise_delta(initial)

    RA = initial[1].data['RA']
    DEC = initial[1].data['DEC']
    z_qso = initial[1].data['Z_COSMO']
    z = initial[4].data['Z']
    DELTA = initial[2].data[:]
    initial.close()

    N_cells = z.shape[0]
    N_qso = z_qso.shape[0]
    N_pix = 12 * N_side**2

    iv = np.ones((N_qso, N_cells))
    PLATE = np.zeros(N_qso, dtype=np.int)
    MJD = np.zeros(N_qso, dtype=np.int)
    FIBER = np.zeros(N_qso, dtype=np.int)

    #Set THING_ID as a 10 digit string, of which the first 3 digits correspond to the node number, and the last 7 correspond to the row number in the original file.
    THING_ID = [''] * N_qso
    node = str(file_number)
    if len(node) <= 3:
        node = '0' * (3 - len(node)) + node
    else:
        exit(
            'The node number is too great to construct a unique THING_ID (more than 3 digits).'
        )

    row_numbers = list(range(N_qso))
    for i in range(len(row_numbers)):
        row_numbers[i] = str(row_numbers[i])
        if len(row_numbers[i]) <= 7:
            row_numbers[i] = '0' * (7 - len(row_numbers[i])) + row_numbers[i]
        else:
            exit(
                'The row number is too great to construct a unique THING_ID (more than 7 digits).'
            )
        THING_ID[i] = node + row_numbers[i]

    #Set up the LOGLAM_MAP
    lya = 1215.67
    LOGLAM_MAP = np.log10(lya * (1 + z))

    #Convert the coordinates into new pixel identifier numbers, according to the N_side specified.
    pixel_ID = np.zeros([1, len(RA)])

    #Convert DEC and RA in degrees to theta and phi in radians.
    theta = (np.pi / 180.0) * (90.0 - DEC)
    phi = (np.pi / 180.0) * RA

    #Make a list of the HEALPix pixel coordinate of each quasar.
    for i in range(len(RA)):
        #Can we just import healpy on cori? May need to install
        #Check that the angular coordinates are valid. Put all objects with invalid coordinates into a non-realistic ID number (-1).
        if 0 <= theta[i] <= np.pi and 0 <= phi[i] <= 2 * np.pi:
            pixel_ID[0, i] = hp.pixelfunc.ang2pix(N_side, theta[i], phi[i])
        else:
            pixel_ID[0, i] = int(node) - 12 * N_side**2

    #print('There are %d objects with invalid angular coordinates.' % (sum(pixel_ID[i] == -1 for i in range(len(pixel_ID)))))
    print(
        'Details of these objects are stored in a file corresponding to a pixel number of -(node number).'
    )

    #Set up a pixel_ID list to map between objects and their pixel.
    pixel_ID = pixel_ID.reshape(-1)

    #Set up a list of pixels represented in the original .fits file.
    pixel_list = list(np.sort(list(set(pixel_ID))))

    #For each pixel represented in the original .fits file, make a new file.
    for n in pixel_list:

        #Progress check aide.
        if n + 1 > 0:
            print('Working on pixel %d (N_pix = %d)' % (n, N_pix))
        else:
            print(
                'Working on set of objects with invalid angular coordinates.')

        pixel_indices = [i for i in range(len(pixel_ID)) if pixel_ID[i] == n]

        pixel_DELTA = np.array([DELTA[i, :] for i in pixel_indices])
        pixel_iv = np.array([iv[i, :] for i in pixel_indices])
        pixel_RA = [RA[i] for i in pixel_indices]
        pixel_DEC = [DEC[i] for i in pixel_indices]
        pixel_z_qso = [z_qso[i] for i in pixel_indices]
        pixel_PLATE = [PLATE[i] for i in pixel_indices]
        pixel_MJD = [MJD[i] for i in pixel_indices]
        pixel_FIBER = [FIBER[i] for i in pixel_indices]
        pixel_THING_ID = [THING_ID[i] for i in pixel_indices]

        #Transpose pixel_DELTA and pixel_iv to match picca input.
        pixel_DELTA = np.transpose(pixel_DELTA)
        pixel_iv = np.transpose(pixel_iv)

        if n >= 0:
            print('There are %d quasars in pixel %d.' %
                  (len(pixel_THING_ID), n))
        else:
            print('There are %d quasars with invalid angular coordinates.' %
                  (len(pixel_THING_ID)))

        if len(pixel_THING_ID) != 0:

            if output_format == 0:
                #Make output for from fitsio
                print('Not written yet!')

            elif output_format == 1:
                #Construct a table for the final hdu.
                col_RA = fits.Column(name='RA', array=pixel_RA, format='E')
                col_DEC = fits.Column(name='DEC', array=pixel_DEC, format='E')
                col_z_qso = fits.Column(name='Z',
                                        array=pixel_z_qso,
                                        format='E')
                col_PLATE = fits.Column(name='PLATE',
                                        array=pixel_PLATE,
                                        format='E')
                col_MJD = fits.Column(name='MJD', array=pixel_MJD, format='E')
                col_FIBER = fits.Column(name='FIBER',
                                        array=pixel_FIBER,
                                        format='E')
                col_THING_ID = fits.Column(name='THING_ID',
                                           array=pixel_THING_ID,
                                           format='10A')

                cols = fits.ColDefs([
                    col_RA, col_DEC, col_z_qso, col_PLATE, col_MJD, col_FIBER,
                    col_THING_ID
                ])

                #Add a couple of headers to the file.
                header = fits.Header()
                header['NSIDE'] = N_side
                header['NQSO'] = len(pixel_THING_ID)
                header['PIX'] = int(n)
                header['LYA'] = lya

                #Create hdus from the data arrays
                hdu_DELTA = fits.PrimaryHDU(data=pixel_DELTA, header=header)
                hdu_iv = fits.ImageHDU(data=pixel_iv, header=header, name='IV')
                hdu_LOGLAM_MAP = fits.ImageHDU(data=LOGLAM_MAP,
                                               header=header,
                                               name='LOGLAM_MAP')
                tbhdu = fits.BinTableHDU.from_columns(cols, header=header)

                hdulist = fits.HDUList(
                    [hdu_DELTA, hdu_iv, hdu_LOGLAM_MAP, tbhdu])

                if n >= 0:
                    new_filename = save_location + '/' + 'node_%s_nside_%d_pix_%d.fits' % (
                        node, N_side, n)
                else:
                    new_filename = save_location + '/' + 'invalid_coords_node_%s_nside_%d.fits' % (
                        node, N_side)

                hdulist.writeto(new_filename)

            else:
                #Some kind of error and option to put in a new format code?
                print('Try another format!')

        else:
            print('No objects in pixel %d.' % n)

    return THING_ID, pixel_ID