示例#1
0
def gridding(arg1,
             imagefile_2,
             fileout=False,
             fullWCS=True,
             ReturnHDU=False,
             ReturnHDUList=False,
             order=1,
             Verbose=False):
    """
    Interpolates Using ndimage and astropy.wcs for coordinate system.
    arg1 is the input data to be gridded, can be either a fits filename or an HDU
    arg2 contains the reference hdr, can either be a fits filename or a header
    """
    if (ReturnHDUList):
        ReturnHDU = True

    if (isinstance(arg1, str)):
        im1, hdr1 = loadfits(arg1)
    elif (isinstance(arg1, fits.hdu.image.PrimaryHDU)):
        im1 = arg1.data
        hdr1 = arg1.header
    elif (isinstance(arg1, fits.hdu.hdulist.HDUList)):
        im1 = arg1[0].data
        hdr1 = arg1[0].header
    else:
        sys.exit("not an recognized input format")

    IsCube = False
    if (len(im1.shape) > 2):
        if (len(im1.shape) > 3):
            im1 = im1[0, :, :, :]
        if (im1.shape[2] > 1):
            IsCube = True
        else:
            im1 = im1[0, :, :]

    if (isinstance(imagefile_2, str)):
        im2, hdr2 = loadfits(imagefile_2)
    else:
        hdr2 = imagefile_2

    SetCRVAL3_1 = False

    if ('CRVAL3' in hdr1.keys()):
        SetCRVAL3_1 = True
        hdr1_CRVAL3 = hdr1['CRVAL3']
    SetCRVAL3_2 = False
    if ('CRVAL3' in hdr2.keys()):
        SetCRVAL3_2 = True
        hdr2_CRVAL3 = hdr2['CRVAL3']

    #hdr1.pop('CRVAL3', None)
    #hdr2.pop('CRVAL3', None)

    hdr1im = Cube2Im.trimhead(hdr1, Inplace=False)
    hdr2im = Cube2Im.trimhead(hdr2, Inplace=False)

    if Verbose:
        pprint(hdr2im)

    w1 = WCS(hdr1im)
    w2 = WCS(hdr2im)

    n2x = hdr2['NAXIS1']
    n2y = hdr2['NAXIS2']
    k2s = np.arange(0, n2x)
    l2s = np.arange(0, n2y)
    kk2s, ll2s = np.meshgrid(k2s, l2s)

    if (fullWCS):
        xxs2wcs, yys2wcs = w2.all_pix2world(kk2s, ll2s, 0)
        kk1s, ll1s = w1.all_world2pix(xxs2wcs, yys2wcs, 0, tolerance=1e-12)
    else:
        xxs2wcs, yys2wcs = w2.wcs_pix2world(kk2s, ll2s, 0)
        kk1s, ll1s = w1.wcs_world2pix(xxs2wcs, yys2wcs, 0)

    im1 = np.nan_to_num(im1)

    if IsCube:
        print("im1.shape", im1.shape)
        nk = im1.shape[0]
        resamp = np.zeros((nk, n2y, n2x))
        print("Resampling ... nk=", nk)
        for k in list(range(nk)):
            print(" k= ", k)
            aplane = im1[k, :, :]
            resamp_aplane = map_coordinates(aplane, [ll1s, kk1s],
                                            prefilter=False,
                                            order=order)  #,order=1
            resamp[k, :, :] = resamp_aplane

    else:
        resamp = map_coordinates(im1, [ll1s, kk1s],
                                 prefilter=False,
                                 order=order)  #,order=1

    resamp = np.nan_to_num(resamp)

    #if (SetCRVAL3_1):
    #    hdr1['CRVAL3']=hdr1_CRVAL3
    if (SetCRVAL3_2):
        hdr2['CRVAL3'] = hdr2_CRVAL3

    if IsCube:
        hdr2['CRVAL3'] = hdr1['CRVAL3']
        hdr2['NAXIS3'] = hdr1['NAXIS3']
        hdr2['CRPIX3'] = hdr1['CRPIX3']
        hdr2['CDELT3'] = hdr1['CDELT3']

    if (fileout):

        fits.writeto(fileout, resamp, hdr2, overwrite=True)

    if ReturnHDU:
        rethdu = fits.PrimaryHDU()
        rethdu.data = resamp
        rethdu.header = hdr2
        if ReturnHDUList:
            hdul = fits.HDUList([rethdu])
            return hdul
        else:
            return rethdu
    else:
        return resamp
示例#2
0
 def make_hdus(self):
     return [
         pyfits.PrimaryHDU(header=None),  #primary
         self.make_table(),  # this table
     ]
示例#3
0
                                                             '_amb'][i])
            vars()['z' + el + '_aver_amb'][i] = np.asarray(
                vars()['z' + el + '_aver_amb'][i])

        vars()['x' + el + '_amb'] = np.asarray(vars()['x' + el + '_amb'])
        vars()['z' + el + '_aver_amb'] = np.asarray(vars()['z' + el +
                                                           '_aver_amb'])

    # Record final FITS table with model parameters for every mass coordinate
    # Each sub-table will contain information for a given age

    prihdr = fits.Header()
    prihdr['DATE'] = time.strftime("%m/%d/%Y")
    prihdr[
        'COMMENT'] = '= Physical profile of a modeled SNR for several expansion ages. Includes shocked ejecta and ambient medium'
    prihdu = fits.PrimaryHDU(header=prihdr)

    cols = [0] * len(data_vs_rad_age)
    tbhdu = [0] * (len(data_vs_rad_age) + 1)  # Header + cols
    tbhdu[0] = prihdu

    for ag, datag in enumerate(data_vs_rad_age):

        cols.append([0.])

        r_conc = np.concatenate((r[ag] / pc_to_cm, r_amb[ag] / pc_to_cm),
                                axis=0)
        r_Table = fits.Column(name='r', format='1E', unit='pc', array=r_conc)

        lagm_conc = np.concatenate((lagm[ag], lagm[ag][-1] + lagm_amb[ag]),
                                   axis=0)
示例#4
0
    def writeFITS(self,
                  template,
                  sciarr,
                  whtarr,
                  ctxarr=None,
                  versions=None,
                  overwrite=yes,
                  blend=True,
                  virtual=False):
        """
        Generate PyFITS objects for each output extension
        using the file given by 'template' for populating
        headers.

        The arrays will have the size specified by 'shape'.
        """
        if not isinstance(template, list):
            template = [template]

        if fileutil.findFile(self.output):
            if overwrite:
                log.info('Deleting previous output product: %s' % self.output)
                fileutil.removeFile(self.output)

            else:
                log.warning('Output file %s already exists and overwrite not '
                            'specified!' % self.output)
                log.error('Quitting... Please remove before resuming '
                          'operations.')
                raise IOError

        # initialize output value for this method
        outputFITS = {}
        # Default value for NEXTEND when 'build'== True
        nextend = 3
        if not self.build:
            nextend = 0
            if self.outweight:
                if overwrite:
                    if fileutil.findFile(self.outweight):
                        log.info('Deleting previous output WHT product: %s' %
                                 self.outweight)
                    fileutil.removeFile(self.outweight)
                else:
                    log.warning('Output file %s already exists and overwrite '
                                'not specified!' % self.outweight)
                    log.error('Quitting... Please remove before resuming '
                              'operations.')
                    raise IOError

            if self.outcontext:
                if overwrite:
                    if fileutil.findFile(self.outcontext):
                        log.info('Deleting previous output CTX product: %s' %
                                 self.outcontext)
                    fileutil.removeFile(self.outcontext)
                else:
                    log.warning('Output file %s already exists and overwrite '
                                'not specified!' % self.outcontext)
                    log.error('Quitting... Please remove before resuming '
                              'operations.')
                    raise IOError

        # Get default headers from multi-extension FITS file
        # If only writing out single drizzle product, blending needs to be
        # forced off as there is only 1 input to report, no blending needed
        if self.single:
            blend = False

        # If input data is not in MEF FITS format, it will return 'None'
        # and those headers will have to be generated from drizzle output
        # file FITS headers.
        # NOTE: These are HEADER objects, not HDUs
        #prihdr,scihdr,errhdr,dqhdr = getTemplates(template)
        self.fullhdrs, intab = getTemplates(template, blend=False)

        newhdrs, newtab = getTemplates(template, blend=blend)
        if newtab is not None: nextend += 1  # account for new table extn

        prihdr = newhdrs[0]
        scihdr = newhdrs[1]
        errhdr = newhdrs[2]
        dqhdr = newhdrs[3]

        # Setup primary header as an HDU ready for appending to output FITS file
        prihdu = fits.PrimaryHDU(header=prihdr, data=None)

        # Start by updating PRIMARY header keywords...
        prihdu.header.set('EXTEND', value=True, after='NAXIS')
        prihdu.header['NEXTEND'] = nextend
        prihdu.header['FILENAME'] = self.output
        prihdu.header['PROD_VER'] = 'DrizzlePac {}'.format(version.__version__)

        # Update the ROOTNAME with the new value as well
        _indx = self.output.find('_drz')
        if _indx < 0:
            rootname_val = self.output
        else:
            rootname_val = self.output[:_indx]
        prihdu.header['ROOTNAME'] = rootname_val

        # Get the total exposure time for the image
        # If not calculated by PyDrizzle and passed through
        # the pardict, then leave value from the template image.
        if self.texptime:
            prihdu.header['EXPTIME'] = self.texptime
            prihdu.header.set('TEXPTIME', value=self.texptime, after='EXPTIME')
            prihdu.header['EXPSTART'] = self.expstart
            prihdu.header['EXPEND'] = self.expend

        #Update ASN_MTYPE to reflect the fact that this is a product
        # Currently hard-wired to always output 'PROD-DTH' as MTYPE
        prihdu.header['ASN_MTYP'] = 'PROD-DTH'

        # Update DITHCORR calibration keyword if present
        # Remove when we can modify FITS headers in place...
        if 'DRIZCORR' in prihdu.header:
            prihdu.header['DRIZCORR'] = 'COMPLETE'
        if 'DITHCORR' in prihdu.header:
            prihdu.header['DITHCORR'] = 'COMPLETE'

        prihdu.header['NDRIZIM'] = (len(self.parlist),
                                    'Drizzle, No. images drizzled onto output')

        # Only a subset of these keywords makes sense for the new WCS based
        # transformations. They need to be reviewed to decide what to keep
        # and what to leave out.
        if not self.blot:
            self.addDrizKeywords(prihdu.header, versions)

        if scihdr:
            try:
                del scihdr['OBJECT']
            except KeyError:
                pass

            if 'CCDCHIP' in scihdr: scihdr['CCDCHIP'] = '-999'
            if 'NCOMBINE' in scihdr:
                scihdr['NCOMBINE'] = self.parlist[0]['nimages']

            # If BUNIT keyword was found and reset, then
            bunit_last_kw = self.find_kwupdate_location(scihdr, 'bunit')
            if self.bunit is not None:
                comment_str = "Units of science product"
                if self.bunit.lower()[:5] == 'count':
                    comment_str = "counts * gain = electrons"
                scihdr.set('BUNIT',
                           value=self.bunit,
                           comment=comment_str,
                           after=bunit_last_kw)
            else:
                # check to see whether to update already present BUNIT comment
                if 'bunit' in scihdr and scihdr['bunit'].lower(
                )[:5] == 'count':
                    comment_str = "counts * gain = electrons"
                    scihdr.set('BUNIT',
                               value=scihdr['bunit'],
                               comment=comment_str,
                               after=bunit_last_kw)

            # Add WCS keywords to SCI header
            if self.wcs:
                pre_wcs_kw = self.find_kwupdate_location(scihdr, 'CD1_1')
                addWCSKeywords(self.wcs,
                               scihdr,
                               blot=self.blot,
                               single=self.single,
                               after=pre_wcs_kw)
                # Recompute this after removing distortion kws
                pre_wcs_kw = self.find_kwupdate_location(scihdr, 'CD1_1')

        ##########
        # Now, build the output file
        ##########
        if self.build:
            print('-Generating multi-extension output file: ', self.output)
            fo = fits.HDUList()

            # Add primary header to output file...
            fo.append(prihdu)

            if self.single and self.compress:
                hdu = fits.CompImageHDU(data=sciarr,
                                        header=scihdr,
                                        name=EXTLIST[0])
            else:
                hdu = fits.ImageHDU(data=sciarr,
                                    header=scihdr,
                                    name=EXTLIST[0])
            last_kw = self.find_kwupdate_location(scihdr, 'EXTNAME')
            hdu.header.set('EXTNAME', value='SCI', after=last_kw)
            hdu.header.set('EXTVER', value=1, after='EXTNAME')
            fo.append(hdu)

            # Build WHT extension here, if requested...
            if errhdr:
                errhdr['CCDCHIP'] = '-999'

            if self.single and self.compress:
                hdu = fits.CompImageHDU(data=whtarr,
                                        header=errhdr,
                                        name=EXTLIST[1])
            else:
                hdu = fits.ImageHDU(data=whtarr,
                                    header=errhdr,
                                    name=EXTLIST[1])
            last_kw = self.find_kwupdate_location(errhdr, 'EXTNAME')
            hdu.header.set('EXTNAME', value='WHT', after=last_kw)
            hdu.header.set('EXTVER', value=1, after='EXTNAME')
            if self.wcs:
                pre_wcs_kw = self.find_kwupdate_location(hdu.header, 'CD1_1')
                # Update WCS Keywords based on PyDrizzle product's value
                # since 'drizzle' itself doesn't update that keyword.
                addWCSKeywords(self.wcs,
                               hdu.header,
                               blot=self.blot,
                               single=self.single,
                               after=pre_wcs_kw)
            fo.append(hdu)

            # Build CTX extension here
            # If there is only 1 plane, write it out as a 2-D extension
            if self.outcontext:
                if ctxarr.shape[0] == 1:
                    _ctxarr = ctxarr[0]
                else:
                    _ctxarr = ctxarr
            else:
                _ctxarr = None

            if self.single and self.compress:
                hdu = fits.CompImageHDU(data=_ctxarr,
                                        header=dqhdr,
                                        name=EXTLIST[2])
            else:
                hdu = fits.ImageHDU(data=_ctxarr,
                                    header=dqhdr,
                                    name=EXTLIST[2])
            last_kw = self.find_kwupdate_location(dqhdr, 'EXTNAME')
            hdu.header.set('EXTNAME', value='CTX', after=last_kw)
            hdu.header.set('EXTVER', value=1, after='EXTNAME')

            if self.wcs:
                pre_wcs_kw = self.find_kwupdate_location(hdu.header, 'CD1_1')
                # Update WCS Keywords based on PyDrizzle product's value
                # since 'drizzle' itself doesn't update that keyword.
                addWCSKeywords(self.wcs,
                               hdu.header,
                               blot=self.blot,
                               single=self.single,
                               after=pre_wcs_kw)
            fo.append(hdu)

            # remove all alternate WCS solutions from headers of this product
            wcs_functions.removeAllAltWCS(fo, [1])

            # add table of combined header keyword values to FITS file
            if newtab is not None:
                fo.append(newtab)

            if not virtual:
                print('Writing out to disk:', self.output)
                # write out file to disk
                fo.writeto(self.output)
                fo.close()
                del fo, hdu
                fo = None
            # End 'if not virtual'
            outputFITS[self.output] = fo

        else:
            print('-Generating simple FITS output: %s' % self.outdata)

            fo = fits.HDUList()
            hdu_header = prihdu.header.copy()
            del hdu_header['nextend']

            # Append remaining unique header keywords from template DQ
            # header to Primary header...
            if scihdr:
                for _card in scihdr.cards:
                    if _card.keyword not in RESERVED_KEYS and _card.keyword not in hdu_header:
                        hdu_header.append(_card)
            for kw in ['PCOUNT', 'GCOUNT']:
                try:
                    del kw
                except KeyError:
                    pass
            hdu_header['filename'] = self.outdata

            if self.compress:
                hdu = fits.CompImageHDU(data=sciarr, header=hdu_header)
                wcs_ext = [1]
            else:
                hdu = fits.ImageHDU(data=sciarr, header=hdu_header)
                wcs_ext = [0]

            # explicitly set EXTEND to FALSE for simple FITS files.
            dim = len(sciarr.shape)
            hdu.header.set('extend', value=False, after='NAXIS%s' % dim)

            # Add primary header to output file...
            fo.append(hdu)

            # remove all alternate WCS solutions from headers of this product
            wcs_functions.removeAllAltWCS(fo, wcs_ext)

            # add table of combined header keyword values to FITS file
            if newtab is not None:
                fo.append(newtab)

            if not virtual:
                print('Writing out image to disk:', self.outdata)
                # write out file to disk
                fo.writeto(self.outdata)
                del fo, hdu
                fo = None
            # End 'if not virtual'
            outputFITS[self.outdata] = fo

            if self.outweight and whtarr != None:
                # We need to build new PyFITS objects for each WHT array
                fwht = fits.HDUList()

                if errhdr:
                    errhdr['CCDCHIP'] = '-999'

                if self.compress:
                    hdu = fits.CompImageHDU(data=whtarr, header=prihdu.header)
                else:
                    hdu = fits.ImageHDU(data=whtarr, header=prihdu.header)
                # Append remaining unique header keywords from template DQ
                # header to Primary header...
                if errhdr:
                    for _card in errhdr.cards:
                        if _card.keyword not in RESERVED_KEYS and _card.keyword not in hdu.header:
                            hdu.header.append(_card)
                hdu.header['filename'] = self.outweight
                hdu.header['CCDCHIP'] = '-999'
                if self.wcs:
                    pre_wcs_kw = self.find_kwupdate_location(
                        hdu.header, 'CD1_1')
                    # Update WCS Keywords based on PyDrizzle product's value
                    # since 'drizzle' itself doesn't update that keyword.
                    addWCSKeywords(self.wcs,
                                   hdu.header,
                                   blot=self.blot,
                                   single=self.single,
                                   after=pre_wcs_kw)

                # Add primary header to output file...
                fwht.append(hdu)
                # remove all alternate WCS solutions from headers of this product
                wcs_functions.removeAllAltWCS(fwht, wcs_ext)

                if not virtual:
                    print('Writing out image to disk:', self.outweight)
                    fwht.writeto(self.outweight)
                    del fwht, hdu
                    fwht = None
                # End 'if not virtual'
                outputFITS[self.outweight] = fwht

            # If a context image was specified, build a PyFITS object
            # for it as well...
            if self.outcontext and ctxarr != None:
                fctx = fits.HDUList()

                # If there is only 1 plane, write it out as a 2-D extension
                if ctxarr.shape[0] == 1:
                    _ctxarr = ctxarr[0]
                else:
                    _ctxarr = ctxarr

                if self.compress:
                    hdu = fits.CompImageHDU(data=_ctxarr, header=prihdu.header)
                else:
                    hdu = fits.ImageHDU(data=_ctxarr, header=prihdu.header)
                # Append remaining unique header keywords from template DQ
                # header to Primary header...
                if dqhdr:
                    for _card in dqhdr.cards:
                        if ((_card.keyword not in RESERVED_KEYS)
                                and _card.keyword not in hdu.header):
                            hdu.header.append(_card)
                hdu.header['filename'] = self.outcontext
                if self.wcs:
                    pre_wcs_kw = self.find_kwupdate_location(
                        hdu.header, 'CD1_1')
                    # Update WCS Keywords based on PyDrizzle product's value
                    # since 'drizzle' itself doesn't update that keyword.
                    addWCSKeywords(self.wcs,
                                   hdu.header,
                                   blot=self.blot,
                                   single=self.single,
                                   after=pre_wcs_kw)

                fctx.append(hdu)
                # remove all alternate WCS solutions from headers of this product
                wcs_functions.removeAllAltWCS(fctx, wcs_ext)
                if not virtual:
                    print('Writing out image to disk:', self.outcontext)
                    fctx.writeto(self.outcontext)
                    del fctx, hdu
                    fctx = None
                # End 'if not virtual'

                outputFITS[self.outcontext] = fctx

        return outputFITS
示例#5
0
def array_footprint_to_hdulist(array, footprint, header):
    hdulist = fits.HDUList()
    hdulist.append(fits.PrimaryHDU(array, header))
    hdulist.append(fits.ImageHDU(footprint, header, name='footprint'))
    return hdulist
def test_tpf_from_images():
    """Basic tests of tpf.from_fits_images()"""
    # Not without a wcs...
    with pytest.raises(Exception):
        KeplerTargetPixelFile.from_fits_images(_create_image_array(), size=(3, 3),
                                               position=SkyCoord(-234.75, 8.3393, unit='deg'))

    # Make a fake WCS based on astropy.docs...
    w = wcs.WCS(naxis=2)
    w.wcs.crpix = [-234.75, 8.3393]
    w.wcs.cdelt = np.array([-0.066667, 0.066667])
    w.wcs.crval = [0, -90]
    w.wcs.ctype = ["RA---AIR", "DEC--AIR"]
    w.wcs.set_pv([(2, 1, 45.0)])
    pixcrd = np.array([[0, 0], [24, 38], [45, 98]], np.float_)
    header = w.to_header()
    header['CRVAL1P'] = 10
    header['CRVAL2P'] = 20
    ra, dec = 268.21686048, -73.66991904

    # Now this should work.
    images = _create_image_array(header=header)
    tpf = KeplerTargetPixelFile.from_fits_images(images, size=(3, 3),
                                                 position=SkyCoord(ra, dec, unit=(u.deg, u.deg)))
    assert isinstance(tpf, KeplerTargetPixelFile)


    with warnings.catch_warnings():
        # Some cards are too long -- to be investigated.
        warnings.simplefilter("ignore", VerifyWarning)
        # Can we write the output to disk?
        # `delete=False` is necessary below to enable writing to the file on Windows
        # but it means we have to clean up the tmp file ourselves
        tmp = tempfile.NamedTemporaryFile(delete=False)
        try:
            tpf.to_fits(tmp.name)
        finally:
            tmp.close()
            os.remove(tmp.name)

        # Can we read in a list of file names or a list of HDUlists?
        hdus = []
        tmpfile_names = []
        for im in images:
            tmpfile = tempfile.NamedTemporaryFile(delete=False)
            tmpfile_names.append(tmpfile.name)
            hdu = fits.HDUList([fits.PrimaryHDU(), im])
            hdu.writeto(tmpfile.name)
            hdus.append(hdu)

        # Should be able to run with a list of file names
        tpf_tmpfiles = KeplerTargetPixelFile.from_fits_images(tmpfile_names,
                            size=(3, 3),
                            position=SkyCoord(ra, dec, unit=(u.deg, u.deg)))

        # Should be able to run with a list of HDUlists
        tpf_hdus = KeplerTargetPixelFile.from_fits_images(hdus,
                            size=(3, 3),
                            position=SkyCoord(ra, dec, unit=(u.deg, u.deg)))

        # Clean up the temporary files we created
        for filename in tmpfile_names:
            try:
                os.remove(filename)
            except PermissionError:
                pass  # This appears to happen on Windows
示例#7
0
文件: imcalc.py 项目: oczoske/imcalc
def imcalc(command, filenames, bitpix=None):
    '''Function to perform image calculations

Parameters
----------
    command [str]:
        Command to perform. FITS file names are referenced by '%1', '%2', etc.
    filenames [list]:
        list of names of FITS files
    bitpix [str or dtype]:
        bitpix of the result

Returns
-------
   A FITS HDU.

'''
    # parse command line options
    tokenlist = command.split()

    print("Command: ", tokenlist, file=sys.stderr)
    print("Files: ", filenames, file=sys.stderr)

    images = dict()

    stack = list()

    # Get size of image
    naxes = []
    naxes.append(fits.getval(filenames[0], 'NAXIS2'))
    naxes.append(fits.getval(filenames[0], 'NAXIS1'))

    if 'x' in tokenlist:
        xarr = np.mgrid[0:naxes[0], 0:naxes[1]][1] + 1.

    if 'y' in tokenlist:
        yarr = np.mgrid[0:naxes[0], 0:naxes[1]][0] + 1.

    for token in tokenlist:
        if token[0] == '%':     # FITS image
            index = int(token[1:]) - 1

            if token not in images.keys():
                images[token] = fits.getdata(filenames[index])

            stack.append(images[token])
        elif token == 'x':     # X array
            stack.append(xarr)
        elif token == 'y':     # Y array
            stack.append(yarr)
        elif token in ['+', '-']:   # can be unary or binary
            right = stack.pop()
            try:
                left = stack.pop()
                result = FUNC2[token](left, right)
            except IndexError:
                result = FUNC1[token](right)
            stack.append(result)
        elif token in FUNC0.keys():  # functions not operating on an image
            result = FUNC0[token](naxes[1], naxes[0])
            stack.append(result)
        elif token in FUNC1.keys():  # unary operators
            right = stack.pop()
            result = FUNC1[token](right)
            stack.append(result)
        elif token in FUNC2.keys():   # binary operators
            right = stack.pop()
            left = stack.pop()
            result = FUNC2[token](left, right)
            print(result.dtype, file=sys.stderr)
            stack.append(result)
        elif token == '?':
            logic = stack.pop()
            false = stack.pop()
            true = stack.pop()
            result = ifelse(logic, true, false)
            stack.append(result)
        else:
            try:    # test if numerical value
                token = float(token)
                stack.append(token)
            except ValueError:   # unknown
                sys.exit("Undefined operation " + token)


    if len(stack) != 1:
        print("Stack has improper length: ", stack, file=sys.stderr)
    else:
        result = stack.pop()

    header = fits.getheader(filenames[0])
    header.add_history("imcalc '" + command + "'")

    if bitpix is not None:
        print("bitpix: ", bitpix)
        result = result.astype(bitpix)

    return fits.PrimaryHDU(result, header)
示例#8
0
def test_convolve_int():
    # Regression test for aplpy/aplpy#165
    hdu = fits.PrimaryHDU(ARRAY)
    f = FITSFigure(hdu)
    f.show_grayscale(smooth=3)
    f.close()
示例#9
0
def test_convolve_default():
    hdu = fits.PrimaryHDU(ARRAY)
    f = FITSFigure(hdu)
    f.show_grayscale(smooth=3)
    f.close()
示例#10
0
def test_convolve_box():
    hdu = fits.PrimaryHDU(ARRAY)
    f = FITSFigure(hdu)
    f.show_grayscale(kernel='box', smooth=3)
    f.close()
示例#11
0
def test_convolve_custom():
    hdu = fits.PrimaryHDU(ARRAY)
    f = FITSFigure(hdu)
    f.show_grayscale(kernel=np.ones((3, 3)))
    f.close()
示例#12
0
def cosmicrays(infile, sigclip=5.0, sigfrac=0.2, objlim=2.0, niter = 4, \
               overwrite=False):
    print('\n#############################')
    print('Cosmicray removing.')

    inhdl = fits.open(infile)
    inhdr = inhdl[0].header
    scidata = inhdl[0].data
    basename = inhdr['FRAMEID']
    crname = basename + '.cr.fits'
    maskname = basename + '.mask.fits'
    if not fi.check_version(inhdl):
        inhdl.close()
        return crname, maskname, False
    inhdl.close()

    if os.path.isfile(crname):
        if not overwrite:
            print('\t Cosmicray-removed frame already exits. ' + crname)
            print('\t This procedure is skipped.')
            inhdl.close()
            return crname, maskname, True

    print('Cosmicray removing for ' + str(inhdr['FRAMEID']))

    nx = scidata.shape[1]
    ny = scidata.shape[0]

    # Cosmicray removing
    print('\t L.A.Cosmic')
    #fitdata = scidata - residualdata
    crmask, cleandata = \
        astroscrappy.detect_cosmics(scidata, sigclip=sigclip, sigfrac=sigfrac, \
                objlim=objlim, gain=1.0, readnoise=4.0, satlevel=np.inf, \
                pssl=0.0, niter=niter, sepmed=False, cleantype='medmask', \
                fsmode='median', verbose=True)

    # Writing output files
    cleanhdu = fits.PrimaryHDU(data=cleandata)
    #cleanhdu = fits.PrimaryHDU(data=cleandata)
    cleanhdl = fits.HDUList([cleanhdu])
    cleanhdl[0].header = inhdr
    cleanhdl[0].header['LACO_VER'] = (astroscrappy.__version__, \
                                      'Python script version of LACOSMIC')

    crmaskint = crmask.astype(np.uint8)
    maskhdu = fits.PrimaryHDU(data=crmaskint)
    #maskhdu = fits.PrimaryHDU(data=fitdata)
    maskhdl = fits.HDUList([maskhdu])
    maskhdl[0].header = inhdr
    maskhdl[0].header['LACO_VER'] = (astroscrappy.__version__, \
                                      'Python script version of LACOSMIC')

    cleanhdl.writeto(crname, overwrite=overwrite)
    maskhdl.writeto(maskname, overwrite=overwrite)

    cleanhdl.close()
    maskhdl.close()

    print('\t Cleaned image ' + crname)
    print('\t Mask image: ' + maskname)
    return crname, maskname, True
示例#13
0
def create_catalog(GCs, output_directory='./', galaxy='FCC47'):
    '''
    Creates the MUSE catalog as a fits file in the output directory
    '''
    # the spectra
    wave = GCs[0].wave

    filename = '{0}_GC_catalog.fits'.format(galaxy)
    print("- Writing: " + output_directory + filename)
    cols = []
    cols.append(
        fits.Column(name='GC_ID',
                    format='D',
                    array=[GCs[i].ID for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='X_PIX',
                    format='D',
                    array=[GCs[i].x for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='Y_PIX',
                    format='D',
                    array=[GCs[i].y for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='SNR',
                    format='D',
                    array=[GCs[i].SNR for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='RA',
                    format='D',
                    array=[GCs[i].ra for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='DEC',
                    format='D',
                    array=[GCs[i].dec for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='g',
                    format='D',
                    array=[GCs[i].g for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='z',
                    format='D',
                    array=[GCs[i].z for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='v',
                    format='D',
                    array=[GCs[i].v for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='dv',
                    format='D',
                    array=[GCs[i].dv for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='m',
                    format='D',
                    array=[GCs[i].m for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='dm',
                    format='D',
                    array=[GCs[i].dm for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='r',
                    format='D',
                    array=[GCs[i].r for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='age',
                    format='D',
                    array=[GCs[i].age for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='g_MUSE',
                    format='D',
                    array=[GCs[i].g_MUSE for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='z_MUSE',
                    format='D',
                    array=[GCs[i].z_MUSE for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='m_miles',
                    format='D',
                    array=[GCs[i].m_miles for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='dm_miles',
                    format='D',
                    array=[GCs[i].dm_miles for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='m_amiles',
                    format='D',
                    array=[GCs[i].m_amiles for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='dm_amiles',
                    format='D',
                    array=[GCs[i].dm_amiles for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='m_CaT',
                    format='D',
                    array=[GCs[i].m_CaT for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='dm_CaT',
                    format='D',
                    array=[GCs[i].dm_CaT for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='m_b',
                    format='D',
                    array=[GCs[i].m_b for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='dm_b',
                    format='D',
                    array=[GCs[i].dm_b for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='m_ssp',
                    format='D',
                    array=[GCs[i].m_ssp for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='dm_ssp',
                    format='D',
                    array=[GCs[i].dm_ssp for i in range(len(GCs))]))
    cols.append(
        fits.Column(name='galaxy',
                    format='A6',
                    array=[GCs[i].galaxy for i in range(len(GCs))]))
    tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))

    dwave = wave[1] - wave[0]
    wave0 = wave[0]
    wave1 = wave[-1]
    hdr = fits.Header()
    hdr['wave0'] = np.round(wave0, 4)
    hdr['wave1'] = np.round(wave1 + dwave, 4)
    hdr['dwave'] = dwave
    hdr['galaxy'] = galaxy

    hdu = fits.PrimaryHDU(header=hdr)
    hdu1 = fits.ImageHDU(
        [GCs[i].spec + GCs[i].bg_spec for i in range(len(GCs))])
    hdu2 = fits.ImageHDU([GCs[i].bg_spec for i in range(len(GCs))])
    hdulist = fits.HDUList([hdu, tbhdu, hdu1, hdu2])
    if os.path.isfile(output_directory + filename):
        os.remove(output_directory + filename)
    hdulist.writeto(output_directory + filename)
示例#14
0
    def savedata(self,
                 filepath,
                 data,
                 klipparams=None,
                 filetype=None,
                 zaxis=None,
                 center=None,
                 astr_hdr=None,
                 fakePlparams=None,
                 more_keywords=None):
        """
        Save data in a GPI-like fashion. Aka, data and header are in the first extension header

        Inputs:
            filepath: path to file to output
            data: 2D or 3D data to save
            klipparams: a string of klip parameters
            filetype: filetype of the object (e.g. "KL Mode Cube", "PSF Subtracted Spectral Cube")
            zaxis: a list of values for the zaxis of the datacub (for KL mode cubes currently)
            astr_hdr: wcs astrometry header (None for NIRC2)
            center: center of the image to be saved in the header as the keywords PSFCENTX and PSFCENTY in pixels.
                The first pixel has coordinates (0,0)
            fakePlparams: fake planet params
            more_keywords (dictionary) : a dictionary {key: value, key:value} of header keywords and values which will
                            written into the primary header

        """
        hdulist = fits.HDUList()
        hdulist.append(fits.PrimaryHDU(header=self.prihdrs[0]))
        hdulist.append(fits.ImageHDU(data=data, name="Sci"))

        # save all the files we used in the reduction
        # we'll assume you used all the input files
        # remove duplicates from list
        filenames = np.unique(self.filenames)
        nfiles = np.size(filenames)
        hdulist[0].header["DRPNFILE"] = nfiles
        for i, thispath in enumerate(filenames):
            thispath = thispath.replace("\\", '/')
            splited = thispath.split("/")
            fname = splited[-1]
            #            matches = re.search('S20[0-9]{6}[SE][0-9]{4}', fname)
            filename = fname  #matches.group(0)
            hdulist[0].header["FILE_{0}".format(i)] = filename

        # write out psf subtraction parameters
        # get pyKLIP revision number
        pykliproot = os.path.dirname(
            os.path.dirname(os.path.realpath(__file__)))
        # the universal_newline argument is just so python3 returns a string instead of bytes
        # this will probably come to bite me later
        try:
            pyklipver = subprocess.check_output(
                ['git', 'rev-parse', '--short', 'HEAD'],
                cwd=pykliproot,
                universal_newlines=True).strip()
        except:
            pyklipver = "unknown"
        hdulist[0].header['PSFSUB'] = "pyKLIP"
        hdulist[0].header.add_history(
            "Reduced with pyKLIP using commit {0}".format(pyklipver))
        #if self.creator is None:
        #    hdulist[0].header['CREATOR'] = "pyKLIP-{0}".format(pyklipver)
        #else:
        #    hdulist[0].header['CREATOR'] = self.creator
        #    hdulist[0].header.add_history("Reduced by {0}".self.creator)

        # store commit number for pyklip
        hdulist[0].header['pyklipv'] = pyklipver

        if klipparams is not None:
            hdulist[0].header['PSFPARAM'] = klipparams
            hdulist[0].header.add_history(
                "pyKLIP reduction with parameters {0}".format(klipparams))

        if fakePlparams is not None:
            hdulist[0].header['FAKPLPAR'] = fakePlparams
            hdulist[0].header.add_history(
                "pyKLIP reduction with fake planet injection parameters {0}".
                format(fakePlparams))

        if filetype is not None:
            hdulist[0].header['FILETYPE'] = filetype

        if zaxis is not None:
            #Writing a KL mode Cube
            if "KL Mode" in filetype:
                hdulist[0].header['CTYPE3'] = 'KLMODES'
                #write them individually
                for i, klmode in enumerate(zaxis):
                    hdulist[0].header['KLMODE{0}'.format(i)] = klmode

        #use the dataset astr hdr if none was passed in
        #if astr_hdr is None:
        #    print self.wcs[0]
        #    astr_hdr = self.wcs[0]
        if astr_hdr is not None:
            #update astro header
            #I don't have a better way doing this so we'll just inject all the values by hand
            astroheader = astr_hdr.to_header()
            exthdr = hdulist[0].header
            exthdr['PC1_1'] = astroheader['PC1_1']
            exthdr['PC2_2'] = astroheader['PC2_2']
            try:
                exthdr['PC1_2'] = astroheader['PC1_2']
                exthdr['PC2_1'] = astroheader['PC2_1']
            except KeyError:
                exthdr['PC1_2'] = 0.0
                exthdr['PC2_1'] = 0.0
            #remove CD values as those are confusing
            exthdr.remove('CD1_1')
            exthdr.remove('CD1_2')
            exthdr.remove('CD2_1')
            exthdr.remove('CD2_2')
            exthdr['CDELT1'] = 1
            exthdr['CDELT2'] = 1

        #use the dataset center if none was passed in
        if center is None:
            center = self.centers[0]
        if center is not None:
            hdulist[0].header.update({
                'PSFCENTX': center[0],
                'PSFCENTY': center[1]
            })
            hdulist[0].header.update({
                'CRPIX1': center[0],
                'CRPIX2': center[1]
            })
            hdulist[0].header.add_history("Image recentered to {0}".format(
                str(center)))

        # store extra keywords in header
        if more_keywords is not None:
            for hdr_key in more_keywords:
                hdulist[0].header[hdr_key] = more_keywords[hdr_key]

        hdulist.writeto(filepath, clobber=True)
        hdulist.close()
示例#15
0
data2 = np.recarray((num, ),
                    dtype=(numpy.record, [('TYPE', 'S16'), ('X', '>f4', (4, )),
                                          ('Y', '>f4', (4, )),
                                          ('R', '>f4', (4, )),
                                          ('FLUX', '>f4', (1, )),
                                          ('ROTANG', '>f4', (4, ))]))
for i in range(0, num):
    #if chandra_type[i] == 'GALAXY':
    data2[i][0] = chandra_type[i]
    data2[i][1] = (x[i], 0, 0, 0)
    data2[i][2] = (y[i], 0, 0, 0)
    data2[i][3] = (r[i], 0, 0, 0)
    data2[i][4] = chandra_flux[i]
    data2[i][5] = (0., 0, 0, 0)

file = "pnS005-bkg_region-sky-original.fits"
hdu = fits.open(file)
data = hdu[1].data

nhdu1 = fits.PrimaryHDU()
nhdu1.header = hdu[0].header

nhdu2 = fits.BinTableHDU.from_columns(data.columns, nrows=num)

nhdu2.data = data2
nhdu2.header = hdu[1].header
nhdu2.header['NAXIS2'] = 739
hdulist = fits.HDUList([nhdu1, nhdu2])
hdulist.writeto("pnS005-bkg_region-sky-type.fits")
示例#16
0
def combine_cubes(listcubes, listmasks, regions=True):
    """
    Combine cubes in mean mode o with median.
    Apply masks as desired.

    cubes    -> a list of cubes to use in the combine
    masks    -> a list of goodpix masks from the pipeline
 
    regions  -> if True, code searches for ds9 region files inside path with same 
                name as pipeline mask (.reg), to mask additional area that one wants 
                to clip

           
    """

    from astropy.io import fits
    import numpy as np
    import scipy
    import os
    import matplotlib.pyplot as plt
    from mypython.fits import pyregmask as msk

    if (os.path.isfile("COMBINED_CUBE_MED.fits")
            & os.path.isfile("COMBINED_CUBE.fits")):
        print("Coadded cubes already exists!")
        return

    #continue with implicit else if passed the checks

    if (regions):
        print("Updating the masks following ds9 regions")

        #loads list
        clistmask = np.loadtxt(listmasks, dtype=np.dtype('a'))

        #redefine new mask
        mask_new = "new_" + listmasks
        llms = open(mask_new, "w")

        #loop over and update with regions
        #if scalar, make it 1 element list
        if (clistmask.shape == ()):
            clistmask = [clistmask]

        for i, cmask in enumerate(clistmask):

            #create region name
            regname_line = (cmask.split(".fits")[0]) + ".reg"
            #reconstruct cubex region name
            rnpath = (cmask.split("MASK")[0])
            rnexp = (cmask.split("_")[1])
            regname_cubex = rnpath + "DATACUBE_FINAL_LINEWCS_" + rnexp + "_fix2_SliceEdgeMask.reg"

            #search if file exist
            if (os.path.isfile(regname_line)):
                regname = regname_line
            elif (os.path.isfile(regname_cubex)):
                regname = regname_cubex
            else:
                regname = None

            if (regname):
                #update the mask
                print("Updating mask using {}".format(regname))

                #open fits
                cfits = fits.open(cmask)

                #init reg mask
                Mask = msk.PyMask(cfits[1].header["NAXIS1"],
                                  cfits[1].header["NAXIS2"], regname)
                for ii in range(Mask.nreg):
                    Mask.fillmask(ii)
                    if (ii == 0):
                        totmask = Mask.mask
                    else:
                        totmask += Mask.mask

                #update the mask
                cfits[1].data = cfits[1].data * 1 * np.logical_not(totmask)
                savename = cmask.split(".fits")[0] + '_wreg.fits'
                cfits.writeto(savename, clobber=True)
                llms.write(savename + '\n')

            else:
                #keep current mask
                llms.write(cmask + '\n')

        #done with new masks
        llms.close()

    else:
        print('Using original masks...')
        mask_new = listmasks

    print("Combining cubes with mean and median")

    #load the relevant lists
    cblis = open(listcubes)
    mklis = open(mask_new)

    allcubes = []
    allmasks = []

    for cc in cblis:
        allcubes.append(fits.open(cc.strip()))

    for mm in mklis:
        allmasks.append(fits.open(mm.strip()))

    cblis.close()
    mklis.close()

    #generate list of cubes
    nexp = len(allcubes)
    print('Coadding {} exposures...'.format(nexp))

    #make space for final grid
    finalcube_mean = np.copy((allcubes[1])[1].data)
    finalvar = np.copy((allcubes[1])[2].data)
    finalcube_median = np.copy((allcubes[1])[1].data)

    #grab info on pixels
    nx = (allcubes[1])[1].header["NAXIS1"]
    ny = (allcubes[1])[1].header["NAXIS2"]
    nw = (allcubes[1])[1].header["NAXIS3"]

    #giant for loop over wave,pix
    print('Working on {} slices...'.format(nw))
    piximage = np.zeros((nexp, ny, nx))
    varimage = np.zeros((nexp, ny, nx))
    mskimage = np.zeros((nexp, ny, nx))
    masknans = np.zeros((ny, nx))

    for ww in range(nw):
        #print (' {} '.format(ww+1),end='')
        #now loop over exposure
        for ee in range(nexp):
            piximage[ee, :] = (allcubes[ee])[1].data[ww, :]
            varimage[ee, :] = (allcubes[ee])[2].data[ww, :]
            #clean nan
            masknans = masknans * 0
            notnans = np.where(np.isfinite(piximage[ee, :]))
            masknans[notnans] = 1
            #1 good pixels at first, then 1 bad pixels
            mskimage[ee, :] = np.logical_not(
                ((allmasks[ee])[1].data) * masknans)

        #construct masked arrays
        pixmasked = np.ma.array(piximage, mask=mskimage)
        varmasked = np.ma.array(varimage, mask=mskimage)

        #make coadds with masking
        finalcube_median[ww, :] = np.ma.median(pixmasked, axis=0)
        finalcube_mean[ww, :] = np.ma.mean(pixmasked, axis=0)
        countmap = np.ma.count(varmasked, axis=0)
        finalvar[ww, :] = np.ma.sum(varmasked, axis=0) / countmap / countmap

    #write
    hdu1 = fits.PrimaryHDU([])
    hdu2 = fits.ImageHDU(finalcube_mean)
    hdu3 = fits.ImageHDU(finalvar)
    hdu2.header = (allcubes[0])[1].header
    hdu3.header = (allcubes[0])[2].header
    hdulist = fits.HDUList([hdu1, hdu2, hdu3])
    hdulist.writeto("COMBINED_CUBE.fits", clobber=True)

    #write
    hdu1 = fits.PrimaryHDU([])
    hdu2 = fits.ImageHDU(finalcube_median)
    hdu3 = fits.ImageHDU(finalvar)
    hdu2.header = (allcubes[0])[1].header
    hdu3.header = (allcubes[0])[2].header
    hdulist = fits.HDUList([hdu1, hdu2, hdu3])
    hdulist.writeto("COMBINED_CUBE_MED.fits", clobber=True)

    #make white images
    print('Creating final white images')
    white_mean = np.zeros((ny, nx))
    white_med = np.zeros((ny, nx))

    for xx in range(nx):
        for yy in range(ny):
            white_mean[yy, xx] = np.sum(finalcube_mean[:, yy, xx]) / nw
            white_med[yy, xx] = np.sum(finalcube_median[:, yy, xx]) / nw

    #save projected image
    hdu1 = fits.PrimaryHDU([])
    hdu2 = fits.ImageHDU(white_mean)
    hdu2.header = (allcubes[0])[1].header
    hdulist = fits.HDUList([hdu1, hdu2])
    hdulist.writeto("COMBINED_IMAGE.fits", clobber=True)

    #save projected image
    hdu1 = fits.PrimaryHDU([])
    hdu2 = fits.ImageHDU(white_med)
    hdu2.header = (allcubes[0])[1].header
    hdulist = fits.HDUList([hdu1, hdu2])
    hdulist.writeto("COMBINED_IMAGE_MED.fits", clobber=True)
示例#17
0
def writexipbias(samples,
                 rhonames,
                 plots=False,
                 xim=False,
                 nameterms='terms_dxi.png',
                 dxiname='dxi.png',
                 namecovmat='covm_pars.png',
                 filename='dxi.fits'):
    from readjson import read_rhos
    from maxlikelihood import bestparameters
    from plot_stats import pretty_rho
    from readfits import read_corr
    from astropy.io import fits
    import numpy as np

    ##Format of the fit file output
    names = ['BIN1', 'BIN2', 'ANGBIN', 'VALUE', 'ANG']
    forms = ['i4', 'i4', 'i4', 'f8', 'f8']
    dtype = dict(names=names, formats=forms)
    nrows = 20
    outdata = np.recarray((nrows, ), dtype=dtype)
    namesout = ['TAU0P', 'TAU2P', 'TAU5P', 'TAU0M', 'TAU2M', 'TAU5M']

    #plot covariance matrix of parameters alpha, beta and eta.
    if plots:
        par_matcov = np.cov(samples)
        corr = corrmatrix(par_matcov)
        #print(par_matcov)
        #print(corr)
        cov_vmin = np.min(corr)
        plt.imshow(corr,
                   cmap='viridis' + '_r',
                   interpolation='nearest',
                   aspect='auto',
                   origin='lower',
                   vmin=cov_vmin,
                   vmax=1.)
        plt.colorbar()
        plt.title(r'$\alpha \mid \beta \mid \eta $')
        plt.savefig(namecovmat, dpi=500)
        print(namecovmat, 'Printed!')

    a = b = n = 0
    vara = varb = varn = 0
    covab = covan = covbn = 0
    bestpar = bestparameters(samples)
    par_matcov = np.cov(samples)
    if (par_matcov.size == 1): variances = par_matcov
    else: variances = np.diagonal(par_matcov)
    covariances = sum(
        (par_matcov[i, i + 1:].tolist() for i in range(len(samples) - 1)), [])
    if (len(samples) == 3):
        a, b, n = bestpar
        vara, varb, varn = variances
        covab, covan, covbn = covariances
    elif (len(samples) == 2):
        a, b = bestpar
        vara, varb = variances
        covab = covariances[0]
    elif (len(samples) == 1):
        a = bestpar[0]
        vara = variances
    else:
        print("Warning, test type not defined")

    meanr, rho0, cov_rho0 = read_corr(rhonames[0])
    meanr, rho1, cov_rho1 = read_corr(rhonames[1])
    meanr, rho2, cov_rho2 = read_corr(rhonames[2])
    meanr, rho3, cov_rho3 = read_corr(rhonames[3])
    meanr, rho4, cov_rho4 = read_corr(rhonames[4])
    meanr, rho5, cov_rho5 = read_corr(rhonames[5])
    sig_rho0 = np.sqrt(np.diag(cov_rho0))
    sig_rho1 = np.sqrt(np.diag(cov_rho1))
    sig_rho2 = np.sqrt(np.diag(cov_rho2))
    sig_rho3 = np.sqrt(np.diag(cov_rho3))
    sig_rho4 = np.sqrt(np.diag(cov_rho4))
    sig_rho5 = np.sqrt(np.diag(cov_rho5))

    #Ploting each term of the bias
    if (plots):
        xlim = [2., 300.]
        #supposing that a,b and n are idependent of rhos(scale independent)
        var0 = ((2 * a * rho0)**2) * vara + (a**2) * (sig_rho0**2)
        var1 = ((2 * b * rho1)**2) * varb + (b**2) * (sig_rho1**2)
        var2 = ((2 * n * rho3)**2) * varn + (n**2) * (sig_rho3**2)
        varab = vara * (b**2) + varb * (a**2) + 2 * covab * (a * b)
        #varab = ((a*b)**2)*( (vara/((a)**2)) + (varb/((b)**2)) + 2*covab/(a*b) )
        var3 = 4 * ((rho2**2) * varab + (sig_rho2**2) * ((a * b)**2))
        #var3 = 4*((a*b*rho2p)**2)*( varab/((a*b)**2) + (sig_rho2/rho2p)**2 )
        varbn = varn * (b**2) + varb * (n**2) + 2 * covbn * (b * n)
        #varbn = ((n*b)**2)*( (varn/((n)**2)) + (varb/((b)**2)) + 2*covbn/(b*n) )
        var4 = 4 * ((rho4**2) * varbn + (sig_rho4**2) * ((n * b)**2))
        #var4 = 4*((n*b*rho4p)**2)*(varbn/((b*n)**2) + (sig_rho4/rho4p)**2)
        varan = varn * (a**2) + vara * (n**2) + 2 * covan * (a * n)
        #varan = ((n*a)**2)*( (varn/((n)**2)) + (vara/((a)**2)) + 2*covan/(a*n) )
        var5 = 4 * ((rho5**2) * varan + (sig_rho5**2) * ((n * a)**2))
        #var5 = 4*((n*a*rho5p)**2)*(varan/((a*n)**2) + (sig_rho5/rho5p)**2)
        plt.clf()
        lfontsize = 7
        if (len(samples) == 3):
            pretty_rho(meanr, (a**2) * rho0,
                       np.sqrt(np.diag(cov_rho0)),
                       legend=r'$\alpha^{2} \rho_{0}$',
                       lfontsize=lfontsize,
                       color='red',
                       ylabel='Correlations',
                       xlim=xlim)
            pretty_rho(meanr, (b**2) * rho1,
                       np.sqrt(var1),
                       legend=r'$\beta^{2}\rho_{1}$',
                       lfontsize=lfontsize,
                       color='green',
                       ylabel='Correlations',
                       xlim=xlim)
            pretty_rho(meanr, (n**2) * rho3,
                       np.sqrt(var2),
                       legend=r'$\eta^{2}\rho_{3}$',
                       lfontsize=lfontsize,
                       color='black',
                       ylabel='Correlations',
                       xlim=xlim)
            pretty_rho(meanr, (2 * a * b) * rho2,
                       np.sqrt(var3),
                       legend=r'$2\alpha\beta \rho_{2}$',
                       lfontsize=lfontsize,
                       color='yellow',
                       ylabel='Correlations',
                       xlim=xlim)
            pretty_rho(meanr, (2 * b * n) * rho4,
                       np.sqrt(var4),
                       legend=r'$2\beta\eta\rho_{4}$',
                       lfontsize=lfontsize,
                       color='blue',
                       ylabel='Correlations',
                       xlim=xlim)
            pretty_rho(meanr, (2 * n * a) * rho5,
                       np.sqrt(var5),
                       legend=r'$2\eta\alpha\rho_{5}$',
                       lfontsize=lfontsize,
                       color='gray',
                       ylabel='Correlations',
                       xlim=xlim)
            print('Printing', nameterms)
            plt.savefig(nameterms, dpi=200)
        if (len(samples) == 2):
            pretty_rho(meanr, (a**2) * rho0,
                       np.sqrt(var0),
                       legend=r'$\alpha^{2} \rho_{0}$',
                       lfontsize=lfontsize,
                       color='red',
                       ylabel='Correlations',
                       xlim=xlim)
            pretty_rho(meanr, (b**2) * rho1,
                       np.sqrt(var1),
                       legend=r'$\beta^{2}\rho_{1}$',
                       lfontsize=lfontsize,
                       color='green',
                       ylabel='Correlations',
                       xlim=xlim)
            pretty_rho(meanr, (2 * a * b) * rho2,
                       np.sqrt(var3),
                       legend=r'$2\alpha\beta \rho_{2}$',
                       lfontsize=lfontsize,
                       color='yellow',
                       ylabel='Correlations',
                       xlim=xlim)
            print('Printing', nameterms)
            plt.savefig(nameterms, dpi=200)
        if (len(samples) == 1):
            pretty_rho(meanr, (a**2) * rho0,
                       np.sqrt(var0),
                       legend=r'$\alpha^{2} \rho_{0}$',
                       lfontsize=lfontsize,
                       color='red',
                       ylabel='Correlations',
                       xlim=xlim)
            print('Printing', nameterms)
            plt.savefig(nameterms, dpi=200)

    #supposing that a,b and n are idependent of rhos(scale independent)
    dxi = (a**2) * rho0 + (b**2) * rho1 + (n**2) * rho3 + (
        2 * a * b) * rho2 + (2 * b * n) * rho4 + (2 * n * a) * rho5
    f1 = 2 * (a * rho0 + b * rho2 + n * rho5)
    f2 = 2 * (b * rho1 + a * rho2 + n * rho4)
    f3 = 2 * (n * rho3 + b * rho4 + a * rho5)
    f4 = a**2
    f5 = b**2
    f6 = 2 * a * b
    f7 = n**2
    f8 = 2 * b * n
    f9 = 2 * n * a
    covmat_dxi = np.diag( (f1**2)*vara + (f2**2)*varb + (f3**2)*varn + + 2*(f1*f2*covab + f1*f3*covan + f2*f3*covbn) ) \
    + (f4**2)*(cov_rho0) + (f5**2)*(cov_rho1) + (f6**2)*(cov_rho2) + (f7**2)*(cov_rho3) +(f8**2)*(cov_rho4) + (f9**2)*(cov_rho5)

    if (plots):
        plt.clf()
        pretty_rho(meanr,
                   dxi,
                   np.sqrt(np.diag(covmat_dxi)),
                   legend=r"$\delta \xi_{+}$",
                   ylabel=r"$\delta \xi_{+}$",
                   xlim=xlim)
        print('Printing', dxiname)
        plt.savefig(dxiname, dpi=150)

    nrows = len(dxi)
    hdu = fits.PrimaryHDU()
    hdul = fits.HDUList([hdu])
    covmathdu = fits.ImageHDU(covmat_dxi, name='COVMAT')
    hdul.insert(1, covmathdu)
    angarray = meanr
    valuearray = np.array(dxi)
    bin1array = np.array([-999] * nrows)
    bin2array = np.array([-999] * nrows)
    angbinarray = np.arange(nrows)
    array_list = [bin1array, bin2array, angbinarray, valuearray, angarray]
    for array, name in zip(array_list, names):
        outdata[name] = array
    corrhdu = fits.BinTableHDU(outdata, name='xi')
    hdul.insert(2, corrhdu)
    hdul.writeto(filename, clobber=True)
    print(filename, 'Written!')
示例#18
0
def make_illcorr_ifu(ifumask_cname,
                     ifumask_iname,
                     data_cname,
                     data_iname,
                     outcorr,
                     outcorrnorm,
                     newcube,
                     newimage,
                     binwidth,
                     debug=False):
    """

    Perform illumination correction on IFUs in wavelength bins 

    ifumask_cname,ifumask_iname  --> IFU mask cube and image names
    data_cname,data_iname        --> data cube and image names
    outcorr,outcorrnorm          --> correction save name
    newcube,newimage             --> data cube and image names for wave dep IFU corrections
    binwidth                     --> how big chuncks in z-direction used for computing illumination correction
    debug                        --> enable interactive displays 

    """

    import os
    import glob
    import subprocess
    import shutil
    from astropy.io import fits
    import muse_utils as mut
    import numpy as np
    import matplotlib as mpl
    import matplotlib.pyplot as plt
    import scipy.signal as sgn
    from scipy.stats import sigmaclip
    from scipy import interpolate
    import sep

    #open the ifu mask to create a good mask
    data = fits.open(data_cname)
    ifimask = fits.open(ifumask_iname)
    fovdata = fits.open(data_iname)

    #define geometry
    nwave = data[1].header["NAXIS3"]
    nx = data[1].header["NAXIS1"]
    ny = data[1].header["NAXIS2"]

    #now flag the sources
    ifumsk = ifimask[1].data
    image = fovdata[1].data.byteswap().newbyteorder()
    bkg = sep.Background(image)
    bkg.subfrom(image)
    obj, segmap = sep.extract(image,
                              3. * bkg.globalrms,
                              minarea=10,
                              segmentation_map=True)

    #manual reset segmap
    #reset=np.where(segmap==20)
    #segmap[reset]=0

    #make a corse illumination correction in wavelenght
    nbins = nwave / binwidth
    illcorse = np.zeros((nbins, 24))
    illnorm = np.zeros((nbins, 24))
    illsmoo = np.zeros((nbins, 24))
    cbins = np.array(range(nbins)) * binwidth + binwidth / 2.

    #skip if already processed
    if not os.path.isfile(outcorr):

        if (debug):
            plt.imshow(image, origin='low')
            plt.title('Field')
            plt.show()
            plt.imshow(segmap, origin='low')
            plt.title('Source mask')
            plt.show()
            plt.imshow(ifumsk, origin='low')
            plt.title('IFU mask')
            plt.show()

        #pixels used
        usedpix = np.zeros((ny, nx))
        #loop over ifus
        for iff in range(24):
            print('Computing correction for IFU {}'.format(iff + 1))
            #reconstruct id of pixels in this IFU
            flagvalue = (iff + 1) * 100.
            #pick pixels in this group and without sources
            #these are x,y in 2D image
            goodpx = np.nonzero(((ifimask[1].data == flagvalue + 1)
                                 | (ifimask[1].data == flagvalue + 2)
                                 | (ifimask[1].data == flagvalue + 3)
                                 | (ifimask[1].data == flagvalue + 4))
                                & (segmap < 1))
            usedpix[goodpx] = 1

            #loop over bins
            for bb in range(nbins):
                #get the start end index
                wstart = bb * binwidth
                wend = (bb + 1) * binwidth
                #sum all in wave
                img = np.nansum(data[1].data[wstart:wend, :, :],
                                axis=0) / binwidth
                #take median across spatial pixels
                illcorse[bb, iff] = np.nanmedian(img[goodpx])

                #compute robust mean - nans already excluded [does not perform very well]
                #c,l,u=sigmaclip(img[goodpx],3.,3.)
                #illcorse[bb,iff]=c.mean()

        #save
        hdu = fits.PrimaryHDU(illcorse)
        hdulist = fits.HDUList([hdu])
        hdulist.writeto(outcorr, clobber=True)

        if (debug):
            plt.imshow(usedpix, origin='low')
            plt.title('Pixels used for IFU correction')
            plt.show()

    else:
        print('Loading pre-computed corrections')
        illcorse = (fits.open(outcorr))[0].data

    #skip if already exists
    if not os.path.isfile(newcube):

        #next go for ifus normalisation given median
        for iff in range(24):
            #normalise
            illnorm[:, iff] = illcorse[:, iff] / np.nanmedian(illcorse, axis=1)
            #remove small scales bumps - [does not work well for discontinuities]
            #illsmoo[:,iff]=sgn.savgol_filter(illnorm[:,iff],5,1)
            #best to linear interpolate
            illsmoo[:, iff] = illnorm[:, iff]

            if (debug):
                plt.scatter(cbins, illnorm[:, iff])
                plt.plot(cbins, illsmoo[:, iff])
                plt.title("Corrections for IFU {}".format(iff + 1))
                plt.show()

        #save corrections
        hdu1 = fits.PrimaryHDU(illnorm)
        hdu2 = fits.ImageHDU(illsmoo)
        hdulist = fits.HDUList([hdu1, hdu2])
        hdulist.writeto(outcorrnorm, clobber=True)

        #store old cube to check final normalisation
        oldcube = np.copy(data[1].data)
        #now apply
        for iff in range(24):
            print('Correct IFUs {}'.format(iff + 1))
            if (iff < 23):
                #first, interpolation along ifus
                x_current_ifu = (iff + 1) * 100.
                x_next_ifu = (iff + 2) * 100.
                #grab relevant pixels
                goodpx = np.where((ifimask[1].data >= x_current_ifu)
                                  & (ifimask[1].data < x_next_ifu))
                fcurrent = interpolate.interp1d(cbins,
                                                illsmoo[:, iff],
                                                fill_value="extrapolate")
                fnext = interpolate.interp1d(cbins,
                                             illsmoo[:, iff + 1],
                                             fill_value="extrapolate")
                #loop over wave and apply correction
                for ww in range(nwave):
                    y_current = fcurrent(ww)
                    y_next = fnext(ww)
                    slope = ((y_next - y_current) /
                             (x_next_ifu - x_current_ifu))
                    correction = y_current + slope * (ifimask[1].data[goodpx] -
                                                      x_current_ifu)
                    #apply correction to data
                    img = data[1].data[ww, :, :]
                    img[goodpx] = img[goodpx] / correction
                    data[1].data[ww, :, :] = img
                    #preserve SN
                    var = data[2].data[ww, :, :]
                    var[goodpx] = var[goodpx] / correction / correction
                    data[2].data[ww, :, :] = var
            else:
                #deal with last - simple correction with no interpolation
                x_current_ifu = (iff + 1) * 100.
                goodpx = np.where((ifimask[1].data >= x_current_ifu))
                fcurrent = interpolate.interp1d(cbins,
                                                illsmoo[:, iff],
                                                fill_value="extrapolate")
                for ww in range(nwave):
                    #apply to data
                    img = data[1].data[ww, :, :]
                    img[goodpx] = img[goodpx] / fcurrent(ww)
                    data[1].data[ww, :, :] = img
                    #preserve SN
                    var = data[2].data[ww, :, :]
                    var[goodpx] = var[goodpx] / fcurrent(ww) / fcurrent(ww)
                    data[2].data[ww, :, :] = var

        #finally, check for normalisation
        print('Checking flux normalisation...')
        white_old = np.zeros((ny, nx))
        white_new = np.zeros((ny, nx))
        for xx in range(nx):
            for yy in range(ny):
                white_old[yy, xx] = np.nansum(oldcube[:, yy, xx]) / nwave
                white_new[yy, xx] = np.nansum(data[1].data[:, yy, xx]) / nwave

        #renormalise on sky only
        goodpx = np.where((segmap == 0) & (np.isfinite(ifimask[1].data)))
        #oldcoeff=np.nanmedian(white_old[goodpx])
        #newcoeff=np.nanmedian(white_new[goodpx])
        #print ('Renormalise by {}'.format(oldcoeff/newcoeff))
        #data[1].data=data[1].data*oldcoeff/newcoeff
        #data[2].data=data[2].data*(oldcoeff/newcoeff)*(oldcoeff/newcoeff)

        renormcoeff = np.nanmedian(white_old[goodpx] / white_new[goodpx])
        print('Renormalise by {}'.format(renormcoeff))
        data[1].data = data[1].data * renormcoeff
        data[2].data = data[2].data * renormcoeff * renormcoeff
        #save new cubes
        data.writeto(newcube, clobber=True)

        #create white image
        print('Creating final white image')
        white_new = np.zeros((ny, nx))
        for xx in range(nx):
            for yy in range(ny):
                white_new[yy, xx] = np.nansum(data[1].data[:, yy, xx]) / nwave

        #save image
        hdu1 = fits.PrimaryHDU([])
        hdu2 = fits.ImageHDU(white_new)
        hdu2.header = data[1].header
        hdulist = fits.HDUList([hdu1, hdu2])
        hdulist.writeto(newimage, clobber=True)

    else:
        print(
            "Exposure already corrected for IFU illumination... move to next")
示例#19
0
文件: imcalc.py 项目: oczoske/imcalc
def imcreate(command, naxes, bitpix=None):
    '''Create an image of size naxes[0] x naxes[1]

Parameters
----------
    command [str]:
        Command to perform. FITS file names are referenced by '%1', '%2', etc.
    naxes [tuple or array]:
        y and x size of output image
    bitpix [str or dtype]:
        bitpix of the result

Returns
-------
   A FITS HDU.

'''

    # parse command line options
    tokenlist = command.split()

    print("Command: ", tokenlist, file=sys.stderr)

    if 'x' in tokenlist:
        xarr = np.mgrid[0:naxes[0], 0:naxes[1]][1] + 1.

    if 'y' in tokenlist:
        yarr = np.mgrid[0:naxes[0], 0:naxes[1]][0] + 1.


    stack = list()

    for token in tokenlist:
        if token == 'x':
            stack.append(xarr)
        elif token == 'y':
            stack.append(yarr)
        elif token in ['+', '-']:   # can be unary or binary
            right = stack.pop()
            try:
                left = stack.pop()
                result = FUNC2[token](left, right)
            except IndexError:
                result = FUNC1[token](right)
            stack.append(result)
        elif token in FUNC0.keys():  # functions not operating on an image
            result = FUNC0[token](naxes[1], naxes[0])
            stack.append(result)
        elif token in FUNC1.keys():  # unary operators
            right = stack.pop()
            result = FUNC1[token](right)
            stack.append(result)
        elif token in FUNC2.keys():   # binary operators
            right = stack.pop()
            left = stack.pop()
            result = FUNC2[token](left, right)
            stack.append(result)
        elif token == '?':
            logic = stack.pop()
            false = stack.pop()
            true = stack.pop()
            result = ifelse(logic, true, false)
            stack.append(result)
        else:
            try:    # test if numerical value
                token = float(token)
                stack.append(token)
            except ValueError:   # unknown
                sys.exit("Undefined operation " + token)

    if len(stack) != 1:
        print("Stack has improper length: ", stack, file=sys.stderr)
    else:
        result = stack.pop()

    header = fits.Header()
    header.add_history("imcalc '" + command + "'")

    if bitpix is not None:
        try:
            result = result.astype(bitpix)
        except TypeError:
            print("is np.int16: ", bitpix is np.int16, file=sys.stderr)
            print("Old type: ", result.dtype, file=sys.stderr)
            print("bitpix: ", bitpix, file=sys.stderr)

    return fits.PrimaryHDU(result, header)
示例#20
0
def make_illcorr_stack(ifumask_cname,
                       ifumask_iname,
                       data_cname,
                       data_iname,
                       outcorr,
                       newcube,
                       newimage,
                       masknative,
                       maskedges,
                       debug=False):
    """

    Perform illumination correction on stacks on white image only 

    ifumask_cname,ifumask_iname  --> IFU mask cube and image names
    data_cname,data_iname        --> data cube and image names
    outcorr                      --> correction save name
    newcube,newimage             --> data cube and image names for white image stack corrections
    masknative                   --> in oputput, mask of native pixels which have not been interpolated
    maskedges                    --> in output, mask of stack edges
    debug                        --> enable interactive displays 
   
    """

    import os
    import glob
    import subprocess
    import shutil
    from astropy.io import fits
    import muse_utils as mut
    import numpy as np
    import matplotlib as mpl
    import matplotlib.pyplot as plt
    import scipy.signal as sgn
    from scipy.stats import sigmaclip
    from scipy import interpolate
    import sep

    if not os.path.isfile(newcube):

        #open the ifu mask to create a good mask
        data = fits.open(data_cname)
        ifimask = fits.open(ifumask_iname)
        fovdata = fits.open(data_iname)

        #define geometry
        nwave = data[1].header["NAXIS3"]
        nx = data[1].header["NAXIS1"]
        ny = data[1].header["NAXIS2"]

        #now flag the sources
        ifumsk = ifimask[1].data
        image = fovdata[1].data.byteswap().newbyteorder()
        bkg = sep.Background(image)
        bkg.subfrom(image)
        obj, segmap = sep.extract(image,
                                  5. * bkg.globalrms,
                                  minarea=10,
                                  segmentation_map=True)

        #remove illumination patterns that can be selected as sources
        #by allowing very extended regions
        for ii, pp in enumerate(obj):
            if (pp['npix'] > 900):
                #print ii, pp['npix']
                pix = np.where(segmap == ii + 1)
                segmap[pix] = 0

        if (debug):
            plt.imshow(image, origin='low')
            plt.title('Field')
            plt.show()
            plt.imshow(segmap, origin='low')
            plt.title('Source mask')
            plt.show()
            plt.imshow(ifumsk, origin='low')
            plt.title('IFU mask')
            plt.show()

        #now compute individual corrections and also prepare maks of native pixels"
        #the step above removes any wave dependency
        #now apply a stack by stack correction computed on white image
        print('Computing correction for stacks on white image')
        masknoninterp = np.zeros((ny, nx))
        usedpix = np.zeros((ny, nx))
        #renormalise on sky only
        goodpx = np.where((segmap == 0) & (np.isfinite(ifimask[1].data)))
        medcoeff = np.nanmedian(fovdata[1].data[goodpx])

        #now compute individual on stacks corrections
        white_corrections = np.zeros((24, 4))
        for iff in range(24):
            for i in range(4):
                #reconstruct id of pixels in this IFU
                flagvalue = (iff + 1) * 100. + i + 1
                #pick pixels in this group and without sources
                #these are indexes in 2D image
                goodpx = np.where((ifimask[1].data == flagvalue)
                                  & (segmap == 0))
                nonintpx = np.where((ifimask[1].data == flagvalue))
                usedpix[goodpx] = 1
                masknoninterp[nonintpx] = 1
                white_corrections[iff, i] = medcoeff / np.nanmedian(
                    fovdata[1].data[goodpx])

        #some dispaly
        if (debug):
            plt.imshow(usedpix, origin='low')
            plt.title('Pixels used for stack white correction')
            plt.show()

            plt.imshow(masknoninterp, origin='low')
            plt.title('Pixels not interpolated')
            plt.show()

        #save products
        hdu = fits.PrimaryHDU(white_corrections)
        hdulist = fits.HDUList([hdu])
        hdulist.writeto(outcorr, clobber=True)

        #save products
        hdu = fits.PrimaryHDU(white_corrections)
        hdulist = fits.HDUList([hdu])
        hdulist.writeto(outcorr, clobber=True)
        #save image
        hdu1 = fits.PrimaryHDU([])
        hdu2 = fits.ImageHDU(masknoninterp)
        hdu2.header = data[1].header
        hdulist = fits.HDUList([hdu1, hdu2])
        hdulist.writeto(masknative, clobber=True)

        #next apply correction
        maskpixedge = np.zeros((ny, nx))

        #grab muse rotator for this exposure
        rotation = data[0].header["HIERARCH ESO INS DROT POSANG"]

        for iff in range(24):
            #this/next ifu pixel
            thisifu = (iff + 1) * 100.
            nextifu = (iff + 2) * 100.
            for i in range(4):
                #reconstruct id of pixels in this/next stack
                thisstack = (iff + 1) * 100. + i + 1
                nextstack = (iff + 1) * 100. + i + 2
                #pixels in this exact stack
                instack = np.where(ifimask[1].data == thisstack)
                #pixels in this IFUs (also interpolated)
                inifu = np.where((ifimask[1].data >= thisifu)
                                 & (ifimask[1].data < nextifu))

                #first find left-right edges of the stacks - this is dependent on rotation
                if ((rotation == 0.) | (rotation == 180.) |
                    (rotation == 360.)):
                    #find edges with buffer
                    left = np.min(instack[1])
                    right = np.max(instack[1])
                    bottom = np.min(inifu[0])
                    top = np.max(inifu[0])
                    maskpixedge[bottom:top, left + 2:right - 2] = 1

                    #apply without interpolation
                    #apply to data
                    data[1].data[:, bottom:top, left:right] = data[
                        1].data[:, bottom:top,
                                left:right] * white_corrections[iff, i]
                    #preserve SN
                    data[2].data[:,bottom:top,left:right]=data[2].data[:,bottom:top,left:right]*\
                        white_corrections[iff,i]*white_corrections[iff,i]

                elif ((rotation == 90.) | (rotation == 270.)):
                    left = np.min(instack[0])
                    right = np.max(instack[0])
                    bottom = np.min(inifu[1])
                    top = np.max(inifu[1])
                    maskpixedge[left + 2:right - 2, bottom:top] = 1

                    #apply without interpolation
                    #apply to data
                    data[1].data[:, left:right, bottom:top] = data[
                        1].data[:, left:right,
                                bottom:top] * white_corrections[iff, i]
                    #preserve SN
                    data[2].data[:,left:right,bottom:top]=data[2].data[:,left:right,bottom:top]*\
                        white_corrections[iff,i]*white_corrections[iff,i]

                else:
                    print(
                        "Cannot handle rotation {}... quit!".format(rotation))
                    exit()

        if (debug):
            plt.imshow(maskpixedge, origin='low')
            plt.title("Edge mask")
            plt.show()

        #save edge mask
        hdu1 = fits.PrimaryHDU([])
        hdu2 = fits.ImageHDU(maskpixedge)
        hdu2.header = data[1].header
        hdulist = fits.HDUList([hdu1, hdu2])
        hdulist.writeto(maskedges, clobber=True)

        #save new cubes
        data.writeto(newcube, clobber=True)

        #create white image
        print('Creating final white image')
        white_new = np.zeros((ny, nx))
        for xx in range(nx):
            for yy in range(ny):
                white_new[yy, xx] = np.nansum(data[1].data[:, yy, xx]) / nwave

        #save image
        hdu1 = fits.PrimaryHDU([])
        hdu2 = fits.ImageHDU(white_new)
        hdu2.header = data[1].header
        hdulist = fits.HDUList([hdu1, hdu2])
        hdulist.writeto(newimage, clobber=True)

    else:
        print("Exposure already corrected... go to next")
# Lectura de imagen.
image = Image.open(dir_img_jpg)
# Conversion a escala de grises.
imagebn = image.convert('L')
# Obtiene el tamano de la imagen.
xsize, ysize = imagebn.size
# Toma los datos de cuentas de la imagen (0 - 255).
fits_aux1 = imagebn.getdata()
# Guarda esas cuentas en un arreglo.
fits_aux2 = np.array(fits_aux1, dtype=np.int32)
# Transforma ese arreglo a las mismas dimensiones de la imagen.
fits_aux3 = fits_aux2.reshape(ysize, xsize)
# Invierte el array para quedar en la orientacion adecuada.
fits_aux4 = np.flipud(fits_aux3)
# Crea un archivo .fits basico.
fits_aux5 = fits.PrimaryHDU(data=fits_aux4)
# Guarda el archivo en formato "fits".
fits_aux5.writeto(nombre_img_fits, clobber=True)

## 5.- Ejecuta SExtractor.

# Cambia el directorio al de SExtractor.
os.chdir(dir_sext)
# Define el directorio de la imagen.
imdir = dir_img_fits + nombre_img_fits
# Define el sextractor.
sext = 'sextractor ' + imdir
# Se corre sextractor. Genera "test.cat" y se lee como tabla.
subprocess.check_output(sext, shell=True)
sex_aux1 = ascii.read('./test.cat', format='sextractor')
# Ordena por magnitud y selecciona las 40 estrellas mas brillantes.
示例#22
0
def internalskysub(listob, skymask, deepwhite=None):
    """

    Perform sky-subtraction using pixels within the cube

    listob  -> OBs to loop on
    skymask -> if defined to a ds9 region file (iamge coordinate), 
               compute sky in these regions (excluding sources)
               Otherwise mask sources and use all the pixels in the field.

    """

    import os
    import glob
    from astropy.io import fits
    import numpy as np
    import zap
    import matplotlib.pyplot as plt
    import sep

    #grab top dir
    topdir = os.getcwd()
    #now loop over each folder and make the final illcorrected cubes
    for ob in listob:

        #change dir
        os.chdir(ob + '/Proc/')
        print('Processing {} for sky subtraction correction'.format(ob))

        #Search how many exposures are there
        scils = glob.glob("OBJECT_RED_0*.fits*")
        nsci = len(scils)

        #loop on exposures and reduce frame with zeroth order sky subtraction + ZAP
        for exp in range(nsci):

            #do pass on IFUs
            print('Interal sky subtraction of exposure {}'.format(exp + 1))

            #define names
            oldcube = "DATACUBE_FINAL_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(
                exp + 1)
            oldimage = "IMAGE_FOV_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(
                exp + 1)
            newcube = "DATACUBE_FINAL_LINEWCS_EXP{0:d}_lineskysub.fits".format(
                exp + 1)
            newimage = "IMAGE_FOV_LINEWCS_EXP{0:d}_lineskysub.fits".format(
                exp + 1)
            ifumask_iname = "IMAGE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp +
                                                                         1)
            source_mask = "IMAGE_SOURCEMASK_LINEWCS_EXP{0:d}.fits".format(exp +
                                                                          1)
            zapcube = "DATACUBE_FINAL_LINEWCS_EXP{0:d}_zapsky.fits".format(
                exp + 1)
            zapimage = "IMAGE_FOV_LINEWCS_EXP{0:d}_zapsky.fits".format(exp + 1)
            zapsvdout = "ZAPSVDOUT_EXP{0:d}.fits".format(exp + 1)

            if not os.path.isfile(zapcube):

                #open the cube
                cube = fits.open(oldcube)
                #open mask ifu
                ifumask = fits.open(ifumask_iname)

                #if white image provided load it
                if (deepwhite):
                    print("Use source mask image {}".format(deepwhite))
                    whsrc = fits.open(topdir + '/' + deepwhite)
                    whitesource = whsrc[0].data.byteswap().newbyteorder()
                else:
                    #create from cube
                    print("Create source mask image from cube")
                    whitesource = np.nanmedian(cube[1].data, axis=0)

                #now create a source mask
                print('Create a source mask')
                header = cube[1].header
                bkg = sep.Background(whitesource)
                bkg_subtraced_data = whitesource - bkg.back()
                thresh = 3. * bkg.globalrms
                minarea = 20.
                clean = True
                segmap = np.zeros((header["NAXIS2"], header["NAXIS1"]))

                #extract objects
                objects, segmap = sep.extract(bkg_subtraced_data,
                                              thresh,
                                              segmentation_map=True,
                                              minarea=minarea,
                                              clean=clean)

                #plt.imshow(segmap,origin='low')
                #plt.show()

                #plt.imshow(whitesource,origin='low')
                #plt.show()

                #define geometry
                nwave = cube[1].header["NAXIS3"]
                nx = cube[1].header["NAXIS1"]
                ny = cube[1].header["NAXIS2"]

                #make sure pixels are sky sub once and only once
                countsub = np.copy(ifumask[1].data) * 0.

                #if mask is set do a corse median sky subtraction
                if (skymask):
                    print('Constructing sky mask')
                    #for zap, sky region should be 0, and sources >1
                    skybox = np.zeros((ny, nx)) + 1
                    #construct the sky region mask
                    from mypython.fits import pyregmask as pmk
                    mysky = pmk.PyMask(nx,
                                       ny,
                                       "../../" + skymask,
                                       header=cube[1].header)
                    for ii in range(mysky.nreg):
                        mysky.fillmask(ii)
                        usepix = np.where(mysky.mask > 0)
                        skybox[usepix] = 0

                    #plt.imshow(skybox,origin='low')
                    #plt.show()
                    #plt.imshow(segmap,origin='low')
                    #plt.show()
                    #plt.imshow(ifumask[1].data,origin='low')
                    #plt.show()
                    #exit()

                    #now do median sky subtraction
                    #loop over wavelength
                    for ww in range(nwave):
                        #extract sky slice
                        skyimg = cube[1].data[ww, :, :]
                        #grab pixels with no source and in mask region
                        #avoid edges not flagged by IFU mask
                        pixels = np.where((skybox < 1) & (segmap < 1)
                                          & (ifumask[1].data > 0))
                        #compute sky in good regions
                        medsky = np.nanmedian(skyimg[pixels])
                        #subtract from all  pixels
                        cube[1].data[ww, :, :] = skyimg - medsky

                else:
                    #otherwise do coarse sky IFU by IFU
                    #loop over ifu
                    for iff in range(24):
                        thisifu = (iff + 1) * 100.
                        nextifu = (iff + 2) * 100. + 1
                        #grab pixels in ifu without sources
                        pixels=np.where((ifumask[1].data >= thisifu) & \
                                            (ifumask[1].data < nextifu)\
                                            & (segmap < 1) )
                        pixels_ifu=np.where((ifumask[1].data >= thisifu) \
                                                & (ifumask[1].data < nextifu)\
                                                & (countsub < 1))
                        #update used pixels
                        countsub[pixels_ifu] = 1

                        #loop over wavelength
                        for ww in range(nwave):
                            skyimg = cube[1].data[ww, :, :]
                            #compute sky in good regions
                            medsky = np.nanmedian(skyimg[pixels])
                            #subtract from all IFU pixels
                            skyimg[pixels_ifu] = skyimg[pixels_ifu] - medsky
                            cube[1].data[ww, :, :] = skyimg

                #write final cube
                cube.writeto(newcube, clobber=True)

                #create white image
                print('Creating final white image')
                white_new = np.zeros((ny, nx))
                for xx in range(nx):
                    for yy in range(ny):
                        white_new[yy, xx] = np.nansum(cube[1].data[:, yy,
                                                                   xx]) / nwave

                #save projected image
                hdu1 = fits.PrimaryHDU([])
                hdu2 = fits.ImageHDU(white_new)
                hdu2.header = cube[1].header
                hdulist = fits.HDUList([hdu1, hdu2])
                hdulist.writeto(newimage, clobber=True)

                #save segmap
                #make it redundant to be sure ZAP read right extension
                hdu1 = fits.PrimaryHDU(segmap)
                #hdu1.header=header
                hdu2 = fits.ImageHDU(segmap)
                #hdu2.header=header
                hdulist = fits.HDUList([hdu1, hdu2])
                hdulist.writeto(source_mask, clobber=True)

                print('Running ZAP on exposure {}'.format(exp + 1))

                #deal with masks
                if (skymask):
                    #combine sky mask with source mask
                    #make it redundant to be sure ZAP read right extension
                    tmpzapmask = segmap + skybox
                    hdu1 = fits.PrimaryHDU(tmpzapmask)
                    #hdu1.header=header
                    hdu2 = fits.ImageHDU(tmpzapmask)
                    #hdu2.header=header
                    hdulist = fits.HDUList([hdu1, hdu2])
                    hdulist.writeto("ZAP_" + source_mask, clobber=True)
                    zapmask = "ZAP_" + source_mask
                else:
                    zapmask = source_mask

                #clean old if exists
                try:
                    os.remove(zapsvdout)
                except:
                    pass
                #run new - handle change in keywords from v1 to v2
                try:
                    zap.process(newcube,
                                outcubefits=zapcube,
                                clean=True,
                                svdoutputfits=zapsvdout,
                                mask=zapmask)
                except:
                    zap.process(newcube,
                                outcubefits=zapcube,
                                clean=True,
                                mask=zapmask)

                #create white image from zap cube
                cube = fits.open(zapcube)
                print('Creating final white image from ZAP')
                white_new = np.zeros((ny, nx))
                for xx in range(nx):
                    for yy in range(ny):
                        white_new[yy, xx] = np.nansum(cube[1].data[:, yy,
                                                                   xx]) / nwave

                #save projected image
                hdu1 = fits.PrimaryHDU([])
                hdu2 = fits.ImageHDU(white_new)
                hdu2.header = cube[1].header
                hdulist = fits.HDUList([hdu1, hdu2])
                hdulist.writeto(zapimage, clobber=True)

            else:
                print("ZAP cube exist alread for exposure {}... skip!".format(
                    exp + 1))

        #back to top for next OB
        os.chdir(topdir)
示例#23
0
def writeSingleFITS(data, wcs, output, template, clobber=True, verbose=True):
    """ Write out a simple FITS file given a numpy array and the name of another
    FITS file to use as a template for the output image header.
    """
    outname, outextn = fileutil.parseFilename(output)
    outextname, outextver = fileutil.parseExtn(outextn)

    if fileutil.findFile(outname):
        if clobber:
            log.info('Deleting previous output product: %s' % outname)
            fileutil.removeFile(outname)

        else:
            log.warning('Output file %s already exists and overwrite not '
                        'specified!' % outname)
            log.error('Quitting... Please remove before resuming operations.')
            raise IOError

    # Now update WCS keywords with values from provided WCS
    if hasattr(wcs.sip, 'a_order'):
        siphdr = True
    else:
        siphdr = False
    wcshdr = wcs.wcs2header(sip2hdr=siphdr)

    if template is not None:
        # Get default headers from multi-extension FITS file
        # If input data is not in MEF FITS format, it will return 'None'
        # NOTE: These are HEADER objects, not HDUs
        (prihdr, scihdr, errhdr,
         dqhdr), newtab = getTemplates(template, EXTLIST)

        if scihdr is None:
            scihdr = fits.Header()
            indx = 0
            for c in prihdr.cards:
                if c.keyword not in ['INHERIT', 'EXPNAME']: indx += 1
                else: break
            for i in range(indx, len(prihdr)):
                scihdr.append(prihdr.cards[i])
            for i in range(indx, len(prihdr)):
                del prihdr[indx]
    else:
        scihdr = fits.Header()
        prihdr = fits.Header()
        # Start by updating PRIMARY header keywords...
        prihdr.set('EXTEND', value=True, after='NAXIS')
        prihdr['FILENAME'] = outname

    if outextname == '':
        outextname = 'sci'
    if outextver == 0: outextver = 1
    scihdr['EXTNAME'] = outextname.upper()
    scihdr['EXTVER'] = outextver

    for card in wcshdr.cards:
        scihdr[card.keyword] = (card.value, card.comment)

    # Create PyFITS HDUList for all extensions
    outhdu = fits.HDUList()
    # Setup primary header as an HDU ready for appending to output FITS file
    prihdu = fits.PrimaryHDU(header=prihdr)
    scihdu = fits.ImageHDU(header=scihdr, data=data)

    outhdu.append(prihdu)
    outhdu.append(scihdu)
    outhdu.writeto(outname)

    if verbose:
        print('Created output image: %s' % outname)
示例#24
0
def write_candidates(output_dir,
                     catId, tract, patch, objId, nVisit, pfsVisitHash,
                     lambda_ranges, mask, candidates, models, zpdf, linemeas, object_class):
    """Create a pfsZcandidates FITS file from an amazed output directory."""

    path = "pfsZcandidates-%03d-%05d-%s-%016x-%03d-0x%016x.fits" % (
        catId, tract, patch, objId, nVisit % 1000, pfsVisitHash)

    print("Saving {} redshifts to {}".format(len(candidates),
                                             os.path.join(output_dir, path)))
    header = [fits.Card('tract', tract, 'Area of the sky'),
              fits.Card('patch', patch, 'Region within tract'),
              fits.Card('catId', catId, 'Source of the objId'),
              fits.Card('objId', objId, 'Unique ID for object'),
              fits.Card('nvisit', nVisit, 'Number of visit'),
              fits.Card('vHash', pfsVisitHash, '63-bit SHA-1 list of visits')]

    hdr = fits.Header(header)
    primary = fits.PrimaryHDU(header=hdr)
    hdul = [primary]

    if object_class == 'GALAXY':
        npix = len(lambda_ranges)

        # data['PDU'] = np.array([])

        # create ZCANDIDATES HDU
        zcandidates = np.ndarray((len(candidates),),
                                    dtype=[('Z', 'f8'), ('Z_ERR', 'f8'),
                                        ('ZRANK', 'i4'),
                                        ('RELIABILITY', 'f8'),
                                        ('CLASS', 'S15'),
                                        ('SUBCLASS', 'S15'),
                                        ('MODELFLUX', 'f8', (npix,))])
        for i, candidate in enumerate(candidates):
            zcandidates[i]['Z'] = candidate.redshift
            zcandidates[i]['Z_ERR'] = candidate.deltaz
            zcandidates[i]['ZRANK'] = candidate.rank
            zcandidates[i]['RELIABILITY'] = candidate.intgProba
            zcandidates[i]['CLASS'] = object_class
            zcandidates[i]['SUBCLASS'] = ''
            model = np.array(lambda_ranges, dtype=np.float64, copy=True)
            model.fill(np.nan)
            np.place(model, mask == 0, models[i])
            zcandidates[i]['MODELFLUX'] = np.array(model)
        hdul.append(fits.BinTableHDU(name='ZCANDIDATES', data=zcandidates))

        # create LAMBDA_SCALE HDU
        lambda_scale = np.array(lambda_ranges, dtype=[('WAVELENGTH', 'f4')])
        hdul.append(fits.BinTableHDU(name='MODELWL', data=lambda_scale))

        # create ZPDF HDU
        zpdf_hdu = np.ndarray(len(zpdf), buffer=zpdf,
                            dtype=[('REDSHIFT', 'f8'), ('PDF', 'f8')])
        hdul.append(fits.BinTableHDU(name='ZPDF', data=zpdf_hdu))

        # create ZLINES HDU
        if linemeas is not None :
            zlines = np.ndarray((len(linemeas),),
                                dtype=[('LINENAME', 'S15'),
                                        ('LINEWAVE', 'f8'),
                                        ('LINEZ', 'f8'),
                                        ('LINEZ_ERR', 'f8'),
                                        ('LINESIGMA', 'f8'),
                                        ('LINESIGMA_ERR', 'f8'),
                                        ('LINEVEL', 'f8'),
                                        ('LINEVEL_ERR', 'f8'),
                                        ('LINEFLUX', 'f8'),
                                        ('LINEFLUX_ERR', 'f8'),
                                        ('LINEEW', 'f8'),
                                        ('LINEEW_ERR', 'f8'),
                                        ('LINECONTLEVEL', 'f8'),
                                        ('LINECONTLEVEL_ERR', 'f8')])
            for i, lm in enumerate(linemeas):
                zlines[i]['LINENAME'] = lm.name
                zlines[i]['LINEWAVE'] = lm.lambda_obs  # TODO: or lambda_rest_beforeOffset ?
                zlines[i]['LINEZ'] = np.nan  # TODO: what is that ?
                zlines[i]['LINEZ_ERR'] = np.nan  # TODO: what is that ?
                zlines[i]['LINESIGMA'] = lm.sigma
                zlines[i]['LINESIGMA_ERR'] = np.nan  # TODO: what is that ?
                zlines[i]['LINEVEL'] = lm.velocity
                zlines[i]['LINEVEL_ERR'] = np.nan  # TODO: what is that
                zlines[i]['LINEFLUX'] = lm.flux
                zlines[i]['LINEFLUX_ERR'] = lm.flux_err
                zlines[i]['LINEEW'] = np.nan  # TODO: what is that
                zlines[i]['LINEEW_ERR'] = np.nan  # TODO: what is that
                zlines[i]['LINECONTLEVEL'] = np.nan  # TODO: what is that
                zlines[i]['LINECONTLEVEL_ERR'] = np.nan  # TODO: what is that
            hdul.append(fits.BinTableHDU(name='ZLINES', data=zlines))

    elif object_class == 'STAR':

        # create ZCANDIDATES HDU
        zcandidates = np.ndarray((len(candidates),),
                                    dtype=[('Z', 'f8'), ('Z_ERR', 'f8'),
                                        ('ZRANK', 'i4'),
                                        ('RELIABILITY', 'f8'),
                                        ('CLASS', 'S15'),
                                        ('SUBCLASS', 'S15')])
        for i, candidate in enumerate(candidates):
            zcandidates[i]['Z'] = candidate.redshift
            zcandidates[i]['Z_ERR'] = 0.
            zcandidates[i]['ZRANK'] = 0
            zcandidates[i]['RELIABILITY'] = candidate.intgProba
            zcandidates[i]['CLASS'] = object_class
            zcandidates[i]['SUBCLASS'] = candidate.template
        hdul.append(fits.BinTableHDU(name='ZCANDIDATES', data=zcandidates))


    fits.HDUList(hdul).writeto(os.path.join(output_dir, path),
                               overwrite=True)
    
    return path
示例#25
0
        print("Could not load file at %s" %PSF_SOURCE)
        sys.exit()

    # Get the FWHM value
    gauss=vip.var.fit_2dgaussian(psf, crop=True, cropsize=30, cent=(PSF_XY[0], PSF_XY[1]), full_output=False, debug=False)

    print(gauss[0:1])
    fwhm_x = gauss[0]
    fwhm_y = gauss[1]
    fwhm = np.mean([fwhm_x, fwhm_y])
    print(fwhm)

    # Compute the central mask size in pixels
    mask_center_pixels = inner_rad_rdi * fwhm

    # Compute optimal principal components
    
    svd_decomposer = vip.pca.SVDecomposer(ref_cube)
    pca_comps = int(svd_decomposer.cevr_to_ncomp(0.9))

    print("Number of PCA components: %d" %pca_comps)
    
    #pcs, recon, residuals_cube, residuals_cube_, frame = vip.pca.pca_fullfr.pca(cube=science_cube, angle_list=angle_list, svd_mode="lapack", scaling="spat-mean", mask_center_px=mask_center_pixels,fwhm=fwhm, full_output=True, verbose=True)
    
    output_cube = vip.pca.pca_fullfr.pca(cube=science_cube, angle_list=angle_list, cube_ref=ref_cube, ncomp=pca_comps, svd_mode="lapack", scaling="spat-mean", mask_center_px=mask_center_pixels, source_xy=(PSF_XY[0], PSF_XY[1]),fwhm=fwhm, full_output=False, verbose=True)

    # Save final cube to target_RDI.fits
    output_filename = target_name + "_RDI.fits"
    hdu_new = fits.PrimaryHDU(output_cube)
    hdu_new.writeto(output_filename, overwrite=True)
    print("Finished writing to %s." %output_filename)
        for jdx in range(kernel_size):
            if (idx - xyc)**2 + (jdx - xyc)**2 >= radius_square:
                output_kernel[idx, jdx] = 0.
    output_kernel_2 = np.flip(np.flip(output_kernel, 0), 1)
    model_image = convolve2d(ref_image, output_kernel_2, mode='same')

    difference_image = model_image - data_image + a_vector[-1]
    difference_image[bright_mask] = 0.
    difference_image[-kernel_size - 2:, :] = 0.
    difference_image[0:kernel_size + 2, :] = 0.
    difference_image[:, -kernel_size - 2:] = 0.
    difference_image[:, 0:kernel_size + 2] = 0.

    return difference_image, output_kernel, a_vector[-1]


if __name__ == '__main__':

    kernel_size = 23
    ref_imagename = 'ref_image.fits'
    data_imagename = 'data_image.fits'
    difference_image, output_kernel, bkg_val = difference_image_single_iteration(
        ref_imagename, data_imagename, kernel_size, max_adu=35000.)

    hl7 = fits.PrimaryHDU(difference_image)
    hl7.writeto('tst_dif18.fits', overwrite=True)

    hl5 = fits.PrimaryHDU(output_kernel)
    hl5.header['BKG'] = bkg_val
    hl5.writeto('kernel_naive.fits', overwrite=True)
示例#27
0
 def hdu_list(self):
     return [
         pyfits.PrimaryHDU(header=None),  #primary
         self.spectral_table(),  # this table
         self.energy_table(),
     ]
def make_simple_trace(bbfile='grism.fits',outname='grismtrace',ybox=None,xbox=None,noisemaxfact=0.05,alph=1.0,Q=1.0,rotate=False,resize=None):

    go=pyfits.open(bbfile)
    
    
    redshift=go['BROADBAND'].header['REDSHIFT']
    wfc3_pix_as=0.13
    g141_nm_per_pix=4.65
    
    min_lam=1.075
    max_lam=1.700
    
    hdu=go['CAMERA0-BROADBAND-NONSCATTER']
    cube=hdu.data #L_lambda units! 
    #cube=np.flipud(cube) ; print(cube.shape)
    
    fil=go['FILTERS']
    lamb=fil.data['lambda_eff']*1.0e6
    flux=fil.data['L_lambda_eff_nonscatter0']
    
    g141_i = (lamb >= min_lam) & (lamb <= max_lam)
    
    arcsec_per_kpc= gsu.illcos.arcsec_per_kpc_proper(redshift)
    kpc_per_arcsec=1.0/arcsec_per_kpc.value
    
    im_kpc=hdu.header['CD1_1']
    print('pix size kpc: ', im_kpc)
    
    wfc3_kpc_per_pix=wfc3_pix_as*kpc_per_arcsec
    total_width_pix=(1.0e3)*(max_lam-min_lam)/g141_nm_per_pix
    total_width_kpc=total_width_pix*wfc3_kpc_per_pix
    
    total_width_impix=int(total_width_kpc/im_kpc)
    
    delta_lam=(max_lam-min_lam)/total_width_impix  #microns/pix
    
    psf_arcsec=0.18
    psf_kpc=psf_arcsec*kpc_per_arcsec
    psf_impix=psf_kpc/im_kpc
    
    
    imw_cross=200
    imw_disp=total_width_impix+imw_cross
    Np=cube.shape[-1]
    mid = np.int64(Np/2)
    delt=np.int64(imw_cross/2)
    output_image=np.zeros_like( np.ndarray(shape=(imw_disp,imw_cross),dtype='float' ))
    #r = r[mid-delt:mid+delt,mid-delt:mid+delt]
    output_image.shape
    small_cube=cube[g141_i,mid-delt:mid+delt,mid-delt:mid+delt]
    
    for i,l in enumerate(lamb[g141_i]):
        di=int( (l-min_lam)/delta_lam )
        this_cube=small_cube[i,:,:]*l**2  #convert to Janskies-like
        if rotate is True:
            this_cube = np.rot90(this_cube)

        #if i==17:
        #    this_cube[30,30] = 1.0e3
        #print(i,l/(1.0+redshift),int(di),np.sum(this_cube),this_cube.shape,output_image.shape,output_image[di:di+imw_cross,:].shape)
        output_image[di:di+imw_cross,:]=output_image[di:di+imw_cross,:]+this_cube
        
        
    output_image=scipy.ndimage.gaussian_filter(output_image,sigma=[4,psf_impix/2.355])
    
    new_thing = np.transpose(np.flipud(output_image))
    if resize is not None:
        new_thing = congrid.congrid(new_thing, resize)
    
    nr = noisemaxfact*np.max(new_thing)*random.randn(new_thing.shape[0],new_thing.shape[1])
    
    #thing=make_color_image.make_interactive(new_thing+nr,new_thing+nr,new_thing+nr,alph=alph,Q=Q)
    #thing=1.0-np.fliplr(np.transpose(thing,axes=[1,0,2]))
    thing=np.fliplr(new_thing+nr)

    f=plt.figure(figsize=(25,6))
    f.subplots_adjust(wspace=0.0,hspace=0.0,top=0.99,right=0.99,left=0,bottom=0)
    axi=f.add_subplot(1,1,1)
    axi.imshow( (thing),aspect='auto',origin='left',interpolation='nearest',cmap='Greys_r')
    f.savefig(outname+'.png',dpi=500)
    plt.close(f)

    #[ybox[0]:ybox[1],xbox[0]:xbox[1]]
    #[50:125,120:820,:]

    new_hdu=pyfits.PrimaryHDU(thing)
    new_list=pyfits.HDUList([new_hdu])
    new_list.writeto(outname+'.fits',clobber=True)


    return thing, new_thing
import ois
import numpy as np
import time
import os
start = time.time()
#
# this scirpt gets all the files in a directoty and appends them to a list to be used stating from all_files[0] as 1st file
#
image_list_prelim = []
for frame in glob.glob("*.fit"):
    image_list_prelim.append(frame)

image_list = [fits.getdata(image) for image in image_list_prelim]

compressed_image = np.sum(image_list, axis=0)
hdu_diff = fits.PrimaryHDU(diff_data, header=fits.getdata(image_list[0]))
# Read FITS data
ref_data = fits.getdata(image_list[0])
targ_data = fits.getdata(compressed_image)
#
# Read FITS headers
ref_hdr = ref_fits[0].header
targ_hdr = targ_fits[0].header
#		aligned_array = []
#
#subtaction of compressed image to reference image
#
diff_data, optimal_image, kernel, background = ois.optimal_system(
    image_list, ref_data, kernelshape=(11, 11), method="Bramich")
hdu_diff = fits.PrimaryHDU(diff_data, header=ref_hdr)
hdu_diff.writeto("final_sub.fit", overwrite=True)
示例#30
0
def breaknint(fitsFile=defaultBreaknint):
    """
    
    NAME:
    ---------
    
    
    PURPOSE:
    ---------
          Separate a 'nominal' NIRCam exposure of NINTS packed into a 
          cube into separate FITS files for each integration. 
    
    CATEGORY:
    ---------
          Data analysis, NIRCam 
    
    
    Parameters
    ------------
    fitsFile: str
        Fits file

    
    DESCRIPTION:
    ------------
          A nominal NIRCam exposure will consist of NINT individual
          ramps. For some reason, someone though it's a good idea to
          make the exposure into a DATA CUBE of size (NX,NY,NZ) where NZ
          is NINT*NGROUP.  A CUBE.  Not extensions, not single files, A
          CUBE. So this code breaks up the exposure into individual FITS
          files, one for each integration. 
    
    MODIFICATION HISTORY:
          Spring 2012 - Created; putridmeat ([email protected])
          Summer 2019 - converting to Python ([email protected])
    """
    HDUList = fits.open(fitsFile)

    if symLinkParam['dmsConvert'] == True:
        ## combine the header info together
        head_prim = HDUList[0].header
        head_sci = HDUList['SCI'].header

        head = merge_headers(head_prim, head_sci)
        head = dms_to_fitswriter_head(head)

        dat = HDUList['SCI'].data
        nr = dat.shape[0] * dat.shape[1]

        times_tab = Table(HDUList['INT_TIMES'].data)
    else:

        head = HDUList[0].header
        dat = HDUList[0].data

        # Get data axes
        nx = dat.shape[2]
        ny = dat.shape[1]
        nr = dat.shape[0]

    # Check nint
    if "NINT" in head:
        if symLinkParam['dmsConvert'] == True:
            int_start_num = head[
                'INTSTART']  ## starting integration number for the segment
            ## use the value packed into the segment/file (not total)
            nint = head['INTEND'] - int_start_num + 1
            nint_orig = head[
                'NINTS']  ## original number of integrations in exposure
        else:
            nint = head["NINT"]
            if nint == 1:  # not a packed data cube
                print("NINT is {}; {} is not a packed data cube.".format(
                    nint, fitsFile))
                print("Going to create just one int file")
            int_start_num = 1
            nint_orig = nint
    else:
        print("Keyword NINT not found; can't split data up.")
        return

    # check ngroup
    if "NGROUP" not in head:
        print('Keyword NGROUP not found.  Assuming NGROUP = NREAD/NINT')
        print('NREAD: {}'.format(nr))
        print('NINT:  {}'.format(nint))
        if np.mod(nr, nint) != 0:
            print("NREAD is not an even multiple of NINT. Can''t proceed")
            return
        else:
            ngroup = nr / nint
            print('Setting NGROUP to {}'.format(ngroup))
    else:  # ngroup found make sure number works
        ngroup = head["NGROUP"]
        if nr != ngroup * nint:
            print("Counting doesn't work out. NREAD must equal NGROUP*NINT")
            print('NINT:  {}'.format(nint))
            print('NGROUP: {}'.format(ngroup))
            print('NGROUP*NINT: {}'.format(ngroup * nint))
            print('NREAD: {}'.format(nr))
            return

    # start your engines.
    BaseName = os.path.splitext(fitsFile)[0]
    print(fitsFile)
    print(BaseName)
    z0 = 0
    z1 = z0 + ngroup - 1
    for i in np.arange(nint):  # Loop over nints
        if np.mod(i, 40) == 0:
            print("Breaking int {} of {}".format(i, nint))

        FullHeader = deepcopy(head)

        tmpStr = "{:05d}".format(i + int_start_num - 1)

        if symLinkParam['dmsConvert'] == True:
            _thisint = flip_data(dat[i], head)
        else:
            # Get this block on nint
            if nint == 1:
                _thisint = dat
            else:
                _thisint = dat[z0:z1 + 1]
        _thisheader = FullHeader
        _thisfile = BaseName + '_I' + tmpStr + '.fits'
        _thisheader.insert(
            "NINT", ("ON_NINT", i + int_start_num, "This is INT of TOT_NINT"),
            after=True)
        _thisheader.insert("ON_NINT",
                           ("TOT_NINT", nint_orig,
                            "Total number of NINT in original exposure"),
                           after=True)
        if symLinkParam['dmsConvert'] == True:
            ## keep track of yet another NINT, which is for the total exposure
            _thisheader.insert("TOT_NINT",
                               ("SEGNINT", nint,
                                "Total number of NINT in the segment or file"),
                               after=True)
            ## grab the
            _thisheader.insert("TIME-OBS",
                               ("BJDMID", times_tab[i]['int_mid_BJD_TDB'],
                                "Mid-Exposure time (MBJD_TDB)"),
                               after=True)
            _thisheader.insert("BJDMID",
                               ("MJDSTART", times_tab[i]['int_start_MJD_UTC'],
                                "Exposure start time (MJD_UTC)"),
                               after=True)
            _thisheader['NINTS'] = 1  # set nint to 1
            _thisheader.insert("NINTS", ("NINT", 1, "Number of ints"))
        else:
            _thisheader['NINT'] = 1  # set nint to 1

        _thisheader[
            "COMMENT"] = 'Extracted from a multi-integration file by ParseIntegration.pro'
        outHDU = fits.PrimaryHDU(_thisint, header=_thisheader)
        if os.path.exists(_thisfile):
            print("Found {}. Not overwriting".format(_thisfile))
        else:
            outHDU.writeto(_thisfile)
        z0 += ngroup
        z1 = z0 + ngroup - 1
        del outHDU

    HDUList.close()