Beispiel #1
0
    def _get_fits_header(self, result, options):
        """Populate FITS Header keywords"""
        super(WFIRSTInstrument, self)._get_fits_header(result, options)
        pupil_hdr = fits.getheader(self.pupil)
        apodizer_hdr = fits.getheader(self._apodizer_fname)
        fpm_hdr = fits.getheader(self._fpm_fname)
        lyotstop_hdr = fits.getheader(self._lyotstop_fname)

        result[0].header.set('MODE', self.mode, comment='Observing mode')
        result[0].header.set('CAMERA', self.camera, comment='Imager or IFS')
        result[0].header.set('APODIZER', self.apodizer, comment='Apodizer')
        result[0].header.set('APODTRAN', os.path.basename(self._apodizer_fname),
                             comment='Apodizer transmission')
        result[0].header.set('PUPLSCAL', apodizer_hdr['PUPLSCAL'],
                             comment='Apodizer pixel scale in m/pixel')
        result[0].header.set('PUPLDIAM', apodizer_hdr['PUPLDIAM'],
                             comment='Full apodizer array size, incl padding.')
        result[0].header.set('FPM', self.fpm, comment='Focal plane mask')
        result[0].header.set('FPMTRAN', os.path.basename(self._fpm_fname),
                             comment='FPM transmission')
        result[0].header.set('FPMSCAL', fpm_hdr['PIXSCALE'], comment='FPM spatial sampling, arcsec/pix')
        result[0].header.set('LYOTSTOP', self.lyotstop, comment='Lyot stop')
        result[0].header.set('LSTRAN', os.path.basename(self._lyotstop_fname),
                             comment='Lyot stop transmission')
        result[0].header.set('PUPLSCAL', lyotstop_hdr['PUPLSCAL'],
                             comment='Lyot stop pixel scale in m/pixel')
        result[0].header.set('PUPLDIAM', lyotstop_hdr['PUPLDIAM'],
                             comment='Lyot stop array size, incl padding.')
Beispiel #2
0
def load_uvot_images(galaxy, band, imdir=''):
    """ugh:
    :returns hdr:

    :returns images:
        List of images  [cps, exposure, sensitivity]

    :returns masks:
        Dictionary of mask images, keyed by strings:
        ['bkg', 'reg', 'reg1', 'reg5', 'reg20']
    """
    imroot = os.path.join(imdir, galaxy, galaxy)
    info = {'root': imroot, 'band': band}
    
    names = ['{root}_t{band}.fits', '{root}_t{band}_ex.fits', '{root}_t{band}_lss.fits']
    #print(names[0].format(**info))
    try:
        hdr = pyfits.getheader(names[0].format(**info), 1)
    except:
        hdr = pyfits.getheader(names[0].format(**info), 0)
    images = [pyfits.getdata(n.format(**info)) for n in names]

    masknames = ['bkg', 'reg', 'reg1', 'reg5', 'reg20']
    masks = [pyfits.getdata('{root}_{mask}.fits'.format(root=info['root'], mask=n))
            for n in masknames]

    return hdr, images, dict(zip(masknames, masks))
Beispiel #3
0
def logfile_sci(logfile, imlist, SaveSteps=False):
    '''Add science images and info to logfile
        
       logfile  - Log file name
       imlist   - List of science files reduced 
     SaveSteps  - Set to True if each step in the reduction is saved
                  as a new file
    '''
    imlist.sort()
    nfiles = len(imlist)
    log = open(logfile, 'a')
    log.write('# Science Images\n')
    if SaveSteps:   
        # if user is saving interim reduction steps to new files, 
        # include the extensions for each caliration type to header
        log.write('#           *.fits - raw image \n')
        log.write('#        *.bs.fits - bias-subtracted image \n')
        log.write('#     *.bs.ff.fits - bias-subtracted and flat-fielded \n')
    log.write('# Image  Object  Exptime  Filter\n')
    # for each file, print the image name, object type, exptime, and filter
    for i in range(nfiles):
        imname = os.path.splitext(os.path.basename(imlist[i]))[0]
        obj = fits.getheader(imlist[i])['OBJECT']
        exptime = fits.getheader(imlist[i])['EXPTIME']
        filt = fits.getheader(imlist[i])['FILTER']
        log.write('%s    %s   %.3f   %s \n' % (imname, obj, exptime, filt))
    log.write('\n')
    log.write('\n')
    log.write('# Comments: \n')
    log.write('\n')
    log.close()
Beispiel #4
0
def hdr_get(gal,data_mode='12m'):
    if isinstance(gal,Galaxy):
        name = gal.name.lower()
    elif isinstance(gal,str):
        name = gal.lower()
    else:
        raise ValueError("'gal' must be a str or galaxy!")
    if data_mode == '7m':
        data_mode = '7m'
        conbeam=None
        print( 'WARNING: SFR maps come in 12m sizes only.') #(!!!) What about for all the new 15" maps?
        print( 'WARNING: Convolution forcibly disabled.')
    elif data_mode in ['12m','12m+7m']:
        data_mode = '12m+7m'  
    
    hdr = None
    hdr_found = False
    if name=='m33':
        for filename in [\
        'notphangsdata/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.peakvels.fits']:
            if os.path.isfile(filename):
                hdr = fits.getheader(filename)
                hdr_found = True
    else:
        for filename in [\
        'phangsdata/'+name+'_co21_'+data_mode+'+tp_mom0.fits',\
        'phangsdata/'+name+'_co21_'+data_mode+'+tp_mom1.fits',\
        'phangsdata/'+name+'_co21_'+data_mode+'+tp_tpeak.fits']:
            if os.path.isfile(filename):
                hdr = fits.getheader(filename)
                hdr_found = True
    if hdr_found == False:
        print('WARNING: No header was found!')
        hdr = None
    return hdr
Beispiel #5
0
def get_file_list():
    lista = sorted(glob.glob("0*.fits")) #will need work
    try:
        header0 = fits.getheader(lista[0])
        this_night = night(header0["DATE"])
        for i in lista:
            #print("./" +i)
            header  = fits.getheader(i)
            obstype = header["OBSTYPE"]
            if obstype == "BIAS":
                this_night.add_bias(i)
            elif obstype == "FLAT":
                grating = header["GRATING"]
                if "NO GRATING" in grating:
                    this_night.add_flat_ng(i)
                elif grating == "KOSI_600":
                    this_night.add_flat_wg(i)
                else:
                    print("Unrecognized grating")
            elif obstype == "OBJECT":
                this_night.add_sci(i)
            elif obstype == "COMP":
                this_night.add_lamp(i)
        return this_night
    except IOError as err:
        if str(err) == "Empty or corrupt FITS file":
            print("Raised an IOError as ",err)
            os.system("rem.csh")
            sys.exit("Please run this script again")
        else:
            print(err)
            sys.exit("Please correct the errors and try again")
Beispiel #6
0
def crossmatchtwofiles(img1, img2, radius=3):
    ''' This module is crossmatch two images:
        It run sextractor transform the pixels position of the the sources in coordinates and crossmatch them  
        The output is a dictionary with the objects in common
    '''

    from numpy import array, argmin, min, sqrt
    import agnkey

    hd1 = pyfits.getheader(img1)
    hd2 = pyfits.getheader(img2)
    wcs1 = pywcs.WCS(hd1)
    wcs2 = pywcs.WCS(hd2)

    xpix1, ypix1, fw1, cl1, cm1, ell1, bkg1, fl1 = agnkey.agnastrodef.sextractor(img1)
    xpix2, ypix2, fw2, cl2, cm2, ell2, bkg2, fl2 = agnkey.agnastrodef.sextractor(img2)
    xpix1, ypix1, xpix2, ypix2 = array(xpix1, float), array(ypix1, float), array(xpix2, float), array(ypix2, float)

    bb = wcs1.wcs_pix2sky(zip(xpix1, ypix1), 1)  # transform pixel in coordinate
    xra1, xdec1 = zip(*bb)
    bb = wcs2.wcs_pix2sky(zip(xpix2, ypix2), 1)  # transform pixel in coordinate
    xra2, xdec2 = zip(*bb)

    xra1, xdec1, xra2, xdec2 = array(xra1, float), array(xdec1, float), array(xra2, float), array(xdec2, float)
    distvec, pos1, pos2 = agnkey.agnastrodef.crossmatch(xra1, xdec1, xra2, xdec2, radius)
    # dict={}
    dict = {'ra1': xra1[pos1], 'dec1': xdec1[pos1], 'ra2': xra2[pos2], 'dec2': xdec2[pos2],
            'xpix1': xpix1[pos1], 'ypix1': ypix1[pos1], 'xpix2': xpix2[pos2], 'ypix2': ypix2[pos2]}

    out = next(tempfile._get_candidate_names())+'.list'
    np.savetxt(out, zip(xpix1[pos1], ypix1[pos1]), fmt='%10.10s\t%10.10s')
    return out, dict
Beispiel #7
0
Datei: k2io.py Projekt: OxES/k2sc
    def read(cls, fname, sid, **kwargs):
        ftype = kwargs.get('type', 'pdc').lower()
        assert ftype in cls.allowed_types, 'Flux type must be either `sap` or `pdc`'
        fkey = cls.fkeys[ftype]

        try:
            epic = int(re.findall('ktwo([0-9]+)-c', basename(fname))[0])
        except:
            epic = int(re.findall('C([0-9]+)_smear', basename(fname))[0][2:]) # for smear
        data  = pf.getdata(fname, 1)
        phead = pf.getheader(fname, 0)
        dhead = pf.getheader(fname, 1)

        [h.remove('CHECKSUM') for h in (phead,dhead)]
        [phead.remove(k) for k in 'CREATOR PROCVER FILEVER TIMVERSN'.split()]

        return K2Data(epic,
                      time    = data['time'],
                      cadence = data['cadenceno'],
                      quality = data['sap_quality'],
                      fluxes  = data[fkey],
                      errors  = data[fkey+'_err'],
                      x       = data['pos_corr1'],
                      y       = data['pos_corr2'],
                      primary_header = phead,
                      data_header = dhead,
                      campaign = phead['campaign'])
def image_limits(image, wispfield):
    '''Find the '''
    # read in combined image
    hdr = fits.getheader(image)
    hdr_wcs = pywcs.WCS(hdr)
    # list of images that were combined
    ims = hdr['IMCMB*']
    Nim = len(ims)
    xx = np.zeros((Nim,2), dtype=int)
    yy = np.zeros((Nim,2), dtype=int)
    for i in range(Nim):
        if os.path.splitext(ims[i])[1] != '.fits':
            ims[i] = ims[i] + '.fits'
        hd = fits.getheader(os.path.join(wispfield,ims[i]))
        nx1 = hd['NAXIS1']
        nx2 = hd['NAXIS2']
        hd_wcs = pywcs.WCS(hd)
        # find WCS coordinates of input image corners
        coo = hd_wcs.wcs_pix2sky([[1,1],[1,nx1-1],[nx2-1,1],[nx2-1,nx1-1]],1)
        # find corresponding pixel coords in combined image
        pix = hdr_wcs.wcs_sky2pix(coo,1)
        xx[i,0] = pix[0][1]
        xx[i,1] = pix[1][1]
        yy[i,0] = pix[0][0]
        yy[i,1] = pix[2][0]

    xmin,xmax = np.max(xx[:,0]), np.min(xx[:,1])
    ymin,ymax = np.max(yy[:,0]), np.min(yy[:,1])
    return xmin,xmax,ymin,ymax
def filter_combo(Palim, UVISims):
    '''Return the filter combination that should be used for 
       determination of the color term.
    '''
    # get Palomar SDSS filters
    hdr = fits.getheader(Palim[0])
    Pal_filt = hdr['FILTER'].rstrip("'")
    
    # get UVIS filters
    UVIS_filt = []
    for im in UVISims:
        hdr = fits.getheader(im)
        UVIS_filt.append(hdr['FILTER'])

    # are both UVIS filters available?
    if len(UVIS_filt) > 1:
        print '\nNOTE: Both UVIS filters -- %s -- are available for '+\
            'this field\n' % [x for x in UVIS_filt]
   
    # possible filter combinations for photometric calibration
    if Pal_filt == 'g':
        use_filt = [x for x in UVIS_filt if x == 'F600LP' or x == 'F814W']
        use_im = [x for x in UVISims if \
                  os.path.basename(x).split('_')[0] == 'F600LP']
    if Pal_filt == 'i':
        use_filt = [x for x in UVIS_filt if x == 'F475X' or x == 'F606W']
        use_im = [x for x in UVISims if \
                  os.path.basename(x).split('_')[0] == 'F475X']
    
    use_filt.append(Pal_filt)
    use_filt.sort()
    return use_filt, use_im
def GetImage(image, mask):
    Passed = 0
    try:
        imageData = fits.getdata(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        hdr = fits.getheader(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        w = WCS(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        Passed = 1
    except:
        print('Trying a different file name.')
        Passed = 0
        pass
    if Passed == 0:
        try:
            imageData = fits.getdata(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
            hdr = fits.getheader(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
            w = WCS(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
        except: raise OSError('We do not know that filename: %s'%(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image)))

    # Parse the header to get the object name
    ObjName = hdr['OBJECT'].split(',')[0].split(' ')[0]
    band    = hdr['FILTER2']

    # Create the masked Image Data
    imageDataM = np.ma.array(imageData, mask=mask)

    # Computed the background levels
    mean, median, std = stats.sigma_clipped_stats(imageData, mask=mask, sigma=3.0)
    print('mean', 'median', 'std', 'BACKGROUND')
    print(mean, median, std)

    # Remove the background
    imageDataRed = imageDataM - median
    return imageDataRed, median, ObjName, band, std, hdr, w
Beispiel #11
0
def crossmatchtwofiles(img1, img2, radius=3):
    ''' This module is crossmatch two images:
        It run sextractor transform the pixels position of the the sources in coordinates and crossmatch them  
        The output is a dictionary with the objects in common
    '''
    import lsc
    from astropy.wcs import WCS
    from numpy import array, argmin, min, sqrt

    hd1 = fits.getheader(img1)
    hd2 = fits.getheader(img2)
    wcs1 = WCS(hd1)
    wcs2 = WCS(hd2)

    xpix1, ypix1, fw1, cl1, cm1, ell1, bkg1, fl1 = lsc.lscastrodef.sextractor(img1)
    xpix2, ypix2, fw2, cl2, cm2, ell2, bkg2, fl2 = lsc.lscastrodef.sextractor(img2)
    xpix1, ypix1, xpix2, ypix2 = array(xpix1, float), array(ypix1, float), array(xpix2, float), array(ypix2, float)

    bb = wcs1.wcs_pix2world(zip(xpix1, ypix1), 1)  #   transform pixel in coordinate
    xra1, xdec1 = zip(*bb)
    bb = wcs2.wcs_pix2world(zip(xpix2, ypix2), 1)  #   transform pixel in coordinate
    xra2, xdec2 = zip(*bb)

    xra1, xdec1, xra2, xdec2 = array(xra1, float), array(xdec1, float), array(xra2, float), array(xdec2, float)
    distvec, pos1, pos2 = lsc.lscastrodef.crossmatch(xra1, xdec1, xra2, xdec2, radius)
    #dict={}
    dict = {'ra1': xra1[pos1], 'dec1': xdec1[pos1], 'ra2': xra2[pos2], 'dec2': xdec2[pos2], \
            'xpix1': xpix1[pos1], 'ypix1': ypix1[pos1], 'xpix2': xpix2[pos2], 'ypix2': ypix2[pos2]}
    np.savetxt('substamplist', zip(xpix1[pos1], ypix1[pos1]), fmt='%10.10s\t%10.10s')
    return 'substamplist', dict
Beispiel #12
0
def extract():
    if os.path.isdir('database'):
        shutil.rmtree('database')

    scilist = glob.glob('AGN*.fits')
    for img in scilist:
        hds = pyfits.getheader(img)
        _gain = hds['HIERARCH ESO DET OUT1 GAIN']
        _ron = hds['HIERARCH ESO DET OUT1 RON']
        name = img.rsplit('.fits',1)[0]+'_EX'
        iraf.specred.apall(img,output=name,interactive='no',find='no',recenter='no',resize='no',edit='no',trace='yes',fittrace='no',extract='yes',extras='yes',review='no',line=600,nsum=20,lower=-5.,upper=5.,b_function='chebyshev',b_order=1,b_sample='-20:-10,10:20',b_naverage=3.,b_niterate=0,b_low_reject=3.,b_high_reject=3.,b_grow=0,width=10.,radius=10.,nfind=1,background='fit',clean='yes',readnoise=_ron,gain=_gain)


    stdlist = glob.glob('STD*.fits')
    lamplist = glob.glob('LAMP*.fits')
    sllist = stdlist + lamplist
    sci = scilist[0]
    ref = sci.rsplit('.fits',1)[0]
    for img in sllist:
        hdl = pyfits.getheader(img)
        _gain = hdl['HIERARCH ESO DET OUT1 GAIN']
        _ron = hdl['HIERARCH ESO DET OUT1 RON']
        dsum = 10
        name = img.rsplit('.fits',1)[0]+'_EX'
        iraf.apsum(img,output=name,references=ref,nsum=dsum,interactive='no',find='no',edit='no',trace='no',fittrace='no',extract='yes',extras='yes',review='no',clean='yes',readnoise=_ron,gain=_gain)
Beispiel #13
0
def get_header_info(pipeline, cat_name, frame):
    """
    Get the exposure time, airmass, and gain from a DECam fits header
    
    Parameters
    ----------
    pipeline: :class:`.Pipeline`
        Object that contains parameters about a decam pipeline
    cat_name: str
        Name of a catalog that contains a list of sources found using decamtoyz
    frame: int
        CCD number of the header to load
    
    Returns
    -------
        exptime: float
            Exposure time of the image
        airmass: float
            Airmass (sec(zenith distance))
        gain: float
            Gain in electrons/adu
    """
    from astropyp import index
    from astropy.io import fits
    import numpy as np
    
    expnum = os.path.basename(cat_name).split('-')[0]
    sql = "select * from decam_files where EXPNUM={0} and PRODTYPE='image'".format(int(expnum))
    files = index.query(sql, pipeline.idx_connect_str)
    header = fits.getheader(os.path.join(pipeline.paths['decam'], files.iloc[0].filename), ext=0)
    exptime = header['exptime']
    airmass = 1/np.cos(np.radians(header['ZD']))
    header = fits.getheader(os.path.join(pipeline.paths['decam'], files.iloc[0].filename), ext=frame)
    gain = header['arawgain']
    return exptime, airmass, gain
Beispiel #14
0
def test_adding_overscan_apogee_u9(make_overscan_test_files):
    original_dir = getcwd()

    apogee = ApogeeAltaU9()
    print(getcwd())
    oscan_dir, has_oscan, has_no_oscan = make_overscan_test_files
    print(getcwd())

    chdir(path.join(_test_dir, oscan_dir))
    # first, does requesting *not* adding overscan actually leave it alone?
    ph.patch_headers(dir='.', new_file_ext='', overwrite=True, purge_bad=False,
                     add_time=False, add_apparent_pos=False,
                     add_overscan=False, fix_imagetype=False)
    header_no_oscan = fits.getheader(has_no_oscan)
    assert 'biassec' not in header_no_oscan
    assert 'trimsec' not in header_no_oscan

    # Now add overscan
    ph.patch_headers(dir='.', new_file_ext='', overwrite=True, purge_bad=False,
                     add_time=False, add_apparent_pos=False,
                     add_overscan=True, fix_imagetype=False)
    print(_test_dir)
    header_no_oscan = fits.getheader(has_no_oscan)
    # This image had no overscan, so should be missing the relevant keywords.
    assert 'biassec' not in header_no_oscan
    assert 'trimsec' not in header_no_oscan
    header_yes_oscan = fits.getheader(has_oscan)
    # This one as overscan, so should include both of the overscan keywords.
    assert header_yes_oscan['biassec'] == apogee.useful_overscan
    assert header_yes_oscan['trimsec'] == apogee.trim_region
    print(getcwd())
    chdir(original_dir)
    print(getcwd())
Beispiel #15
0
def load(filepath, pos_filepath):
    """
    :param filepath: fits image path
    :return: an Image
    """

    # load image data information
    data = pyfits.getdata(filepath, hdu=0)
    primary = Header(pyfits.getheader(filepath, 0))
    headers = [primary]
    extcount = int(primary.get("NEXTEND", 0))

    for idx in range(1, extcount):
        ext = Header(pyfits.getheader(filepath, idx))
        headers.append(ext)

    # load position information
    pos_primary = Header(pyfits.getheader(pos_filepath, 0))
    pos_headers = [pos_primary]
    pos_extcount = int(pos_primary.get("NEXTEND", 0))

    for idx in range(1, pos_extcount):
        ext = Header(pyfits.getheader(pos_filepath, idx))
        pos_headers.append(ext)

    return Image(array(data), headers, pos_headers)
Beispiel #16
0
    def test_GetHeaderConvienceFunction(self, filename, ext, naxis1, naxis2):
        """Test the getheader convience function in both the fits and
        stpyfits namespace."""

        if ext is None:
            hd = stpyfits.getheader(self.data(filename))
            hd1 = fits.getheader(self.data(filename))
        else:
            hd = stpyfits.getheader(self.data(filename), ext)
            hd1 = fits.getheader(self.data(filename), ext)

        assert hd['NAXIS'] == 2
        assert hd1['NAXIS'] == 0
        assert hd['NAXIS1'] == naxis1
        assert hd['NAXIS2'] == naxis2

        for k in ('NAXIS1', 'NAXIS2'):
            with pytest.raises(KeyError):
                hd1[k]
        for k in ('NPIX1', 'NPIX2'):
            with pytest.raises(KeyError):
                hd[k]

        assert hd1['NPIX1'] == naxis1
        assert hd1['NPIX2'] == naxis2
    def setup(self, SE_only, no_conv):
        """Return lists of all images and filters used in this Par.
           We will use the unrotated images for use with a single psf
           Image filenames:
             ParXXX/DATA/UVIS/IRtoUVIS/FYYY_UVIS_sci.fits
             ParXXX/DATA/UVIS/IRtoUVIS/FYYY_UVIS_rms.fits
        """
        images = glob(os.path.join(self.imdir, 'F*_UVIS_sci.fits'))

        # dictionary of zero points
        zps = self.get_zp(images[0])

        # build table
        t = Table(data=None, 
                  names=['filt','image','convim','rms','wht','exptime','zp'],
                  dtype=['S10', 'S60', 'S60', 'S60', 'S60', float, float])
        for image in images:
            filt = fits.getheader(image)['FILTER']

            # weight map
            wht = image.split('_sci.fits')[0] + '_wht.fits'

            # clean image for convolution
            tmp = os.path.splitext(image)[0] + '_cln.fits'
            image_cln = os.path.join(self.outdir, os.path.basename(tmp))
            if SE_only is False:
                print 'Cleaning %s' % os.path.basename(image)
                if no_conv:
                    clean_image(image, image_cln, cln_by_wht=False, whtim=wht)
                else:
                    clean_image(image, image_cln, cln_by_wht=True, whtim=wht)
            
            # names of convolved images
            if filt == self.reddest_filt:
                convim = image_cln
            else:
                check = re.search('\d+', self.reddest_filt)
                rf = check.group(0)
                convim = os.path.join(self.outdir,'%s_convto%s.fits'%(filt,rf))

            # replace zeros with 1.e9 in rms analysis maps
            rms0 = image.split('_sci.fits')[0] + '_rms.fits'
            tmp = os.path.splitext(rms0)[0] + '_analysis.fits'
            rms_analysis = os.path.join(self.outdir, os.path.basename(tmp))
            self.fix_rms_map(rms0, rms_analysis, value=1.e10,rmstype='analysis')

            # for detection image, create detection RMS map as well
            if filt == self.detect_filt:
                tmp2 = os.path.splitext(rms0)[0] + '_detection.fits'
                rms_detect = os.path.join(self.outdir, os.path.basename(tmp2))
                self.fix_rms_map(rms0, rms_detect, value=0.01, 
                                 rmstype='detection', whtim=wht)
            
            exptime = fits.getheader(image)['EXPTIME']
            zp = zps[filt]

            t.add_row([filt, image_cln, convim, rms_analysis, wht, exptime, zp])
        # set detection RMS map
        self.detect_rms = rms_detect
        return t
Beispiel #18
0
def time_calibration(input_file):
    """
    Obtain the calibration for time (hjd) by pyraf and the airmass for each image. Include in the header all information.
    """
    original_path = os.getcwd()
    save_path = input_file['save_path']
    #change to save data reduction directory
    os.chdir(save_path)
    print '\n Reading the list of images ....\n'
    planet = input_file['exoplanet'] #set exoplanet name
    images = sorted(glob.glob('AB'+planet+'*.fits'))
    print images
    #include de RA,DEC and epoch of the exoplanet
    RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']
    #obtain ST JD using iraf task and introduce in the header
    for i in range(len(images)):
        hdr = fits.getheader(images[i])
        if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):
            new_date = use.yesterday(hdr['date-obs'])
            #print images[i], new_date
        else:
            new_date = hdr['date-obs']
        year,month,day = split(new_date,'-')
        iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])
        JD = iraf.asttimes.jd #obtain julian date
        LMST = iraf.asttimes.lmst #obtain the sideral time
        LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format
        iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header
        iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header
        iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header
        #include RA, and DEC of the object in your header
        iraf.ccdhedit(images[i],"RA",RA,type="string") #include right ascention in the header
        iraf.ccdhedit(images[i],"DEC",DEC,type="string")  #include declination in the header
        iraf.ccdhedit(images[i],"epoch",epoch,type="string") #include epoch in the header
        # use.update_progress((i+1.)/len(images))
    print '\n Setting airmass ....\n'
    for i in range(len(images)):
        print '# ',images[i]
        #iraf.hedit(images[i],'airmass',airmass,add='yes')
        #iraf.hedit(images[i],'HJD',HJD,add='yes')
        iraf.setairmass.observatory = input_file['observatory']
        iraf.setairmass(images[i])
        iraf.setjd.time = 'ut'
        iraf.setjd(images[i])
    print '\n.... done.\n'
    #export information
    hjd, jd, airmass, st = [],[],[],[]
    for i in range(len(images)):
        hdr = fits.getheader(images[i])
        hjd.append(hdr['HJD'])
        jd.append(hdr['JD'])
        airmass.append(hdr['airmass'])
        st.append(hdr['st'])
    #saving the data
    data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T
    data.columns = ['HJD','JD','ST','Airmass']
    data.to_csv('results_iraf_calibrations.csv')
    #change to workings directory
    os.chdir(original_path)
    return
Beispiel #19
0
def find_best_flat(flt_fits, verbose=True):
    """
    Find the most recent PFL file in $IREF for the filter used for the 
    provided FLT image.  Doesn't do any special check on USEAFTER date, just
    looks for the most-recently modified file. 
    """
    import glob
    import os.path
    import time
    
    IREF = os.environ["iref"]

    the_filter = fits.getheader(flt_fits,0).get('FILTER')
    
    pfls = glob.glob(IREF+'*pfl.fits')
    latest = 0
    best_pfl = None
    
    for pfl in pfls:
        head = fits.getheader(pfl)
        if head.get('FILTER') != the_filter:
            continue    
        
        this_created = os.path.getmtime(pfl)
        if this_created > latest:
            best_pfl = pfl
            latest = this_created
            
        if verbose:
            print '%s %s %s' %(pfl, the_filter, time.ctime(latest))
    
    return best_pfl #, the_filter, time.ctime(latest)
    def testGetHeaderConvienceFunction(self):
        """Test the getheader convience function in both the fits and
           stpyfits namespace."""

        hd = stpyfits.getheader(self.data('cdva2.fits'))
        hd1 = fits.getheader(self.data('cdva2.fits'))

        assert_equal(hd['NAXIS'], 2)
        assert_equal(hd1['NAXIS'], 0)
        assert_equal(hd['NAXIS1'], 10)
        assert_equal(hd['NAXIS2'], 10)

        assert_raises(KeyError, lambda: hd1['NAXIS1'])
        assert_raises(KeyError, lambda: hd1['NAXIS2'])
        assert_raises(KeyError, lambda: hd['NPIX1'])
        assert_raises(KeyError, lambda: hd['NPIX2'])

        assert_equal(hd1['NPIX1'], 10)
        assert_equal(hd1['NPIX2'], 10)

        hd = stpyfits.getheader(self.data('o4sp040b0_raw.fits'), 2)
        hd1 = fits.getheader(self.data('o4sp040b0_raw.fits'), 2)

        assert_equal(hd['NAXIS'], 2)
        assert_equal(hd1['NAXIS'], 0)
        assert_equal(hd['NAXIS1'], 62)
        assert_equal(hd['NAXIS2'], 44)

        assert_raises(KeyError, lambda: hd1['NAXIS1'])
        assert_raises(KeyError, lambda: hd1['NAXIS2'])
        assert_raises(KeyError, lambda: hd['NPIX1'])
        assert_raises(KeyError, lambda: hd['NPIX2'])

        assert_equal(hd1['NPIX1'], 62)
        assert_equal(hd1['NPIX2'], 44)
def SExtractorDualImage(drPath,tract,bands,patches,prefix,sexdir,dotsex,zps):
    '''
    Run SExtractor in dual image mode for each patch,band using chi2 image for detection
    '''
    os.chdir(sexdir)
    for band in bands:
        for patch in patches:
            # Move Variance extension to new file for SExtractor
            imname = drPath+'deepCoadd/'+band+'/'+tract+'/'+patch+'/'+\
                        'calexp-'+band+'-'+tract+'-'+patch[0]+patch[-1]
            varHead = fits.getheader(imname+'.fits','VARIANCE')
            maskHead = fits.getheader(imname+'.fits','MASK')
            im = fits.open(imname+'.fits')
            fits.writeto(imname+'_var.fits',im['VARIANCE'].data,varHead,clobber=True)
            fits.writeto(imname+'_mask.fits',im['MASK'].data.astype(float),maskHead,clobber=True)
            inImage = imname+'.fits[1]'
            outCat = drPath+'deepCoadd/'+band+'/'+tract+'/'+patch+'/'+band+'-'+tract+'-'+patch[0]+patch[-1]+'-chi2.cat'
            chi2Image = drPath+'deepCoadd/'+prefix+'chi2/'+tract+'/'+patch+'/'+prefix+'chi2-'+tract+'-'+patch+'.fits'
            # Run SExtractor
            os.system('sex '+chi2Image+','+inImage+' -c '+dotsex+' -CATALOG_NAME '+outCat+' -WEIGHT_IMAGE None,'+\
                        imname+'_var.fits -WEIGHT_TYPE NONE,MAP_VAR -MAG_ZEROPOINT '+str(zps[bands.index(band)]))
            # Add flags to catalog
            os.system('./venice -m '+imname+'_mask.fits -cat '+outCat+ ' -f all -xcol 2 -ycol 3 -o '+\
                    drPath+'deepCoadd/'+band+'/'+tract+'/'+patch+'/'+band+'-'+tract+'-'+patch[0]+patch[-1]+'-chi2-flags.cat')
            
            os.system('rm '+imname+'_var.fits')
            os.system('rm '+imname+'_mask.fits')
Beispiel #22
0
def marxpars_from_asol(conf, asolfile, evt2file):
    '''Set MARX parameters from asol and evt file.

    This function parses the header of a fits file and uses the
    information in the header to set as many marx parameters as possible.

    Parameters
    ----------
    conf : `~ConfigParser.ConfigParser` instance
        The configuration file contains the initialization code for CIAO.

    asolfile : string
        Path and name of an asol file

    evt2file : string
        Path and name of an evt2 file

    Returns
    -------
    marx_pars : dict
        Dictionary with marx parameters as far as they can be extracted from
        the data in the asol file.
    '''
    asol = fits.getheader(asolfile, 1)
    evt = fits.getheader(evt2file, 1)

    marx_pars = OrderedDict()
    marx_pars['RA_Nom'] = asol['RA_NOM']
    marx_pars['Dec_Nom'] = asol['DEC_NOM']
    marx_pars['Roll_Nom'] = asol['ROLL_NOM']
    marx_pars['GratingType'] = asol['GRATING']
    marx_pars['ExposureTime'] = asol['TSTOP'] - asol['TSTART']
    marx_pars['DitherModel'] = 'FILE'
    marx_pars['DitherFile'] = asolfile
    marx_pars['TStart'] = asol['TSTART']

    # ACIS Exposure time (might vary for sub-array read-out)
    if evt['INSTRUME'][0:4] == 'ACIS':
        marx_pars['ACIS_Exposure_Time'] = evt['EXPTIME']
    # Target coordiantes
    # ra, dec = target_coos_from_asol(asolfile)
    marx_pars['SourceRA'] = evt['RA_TARG']
    marx_pars['SourceDEC'] = evt['DEC_TARG']

    # DetectorType
    det = detectorfromkeyword(evt['DETNAM'])
    if det != '':
        marx_pars['DetectorType'] = det
        # Find offsets from nominal pointing
        aimpts = Table.read(os.path.join(conf.get('marx', 'path'), 'share', 'marx', 'data', 'caldb', 'telD1999-07-23aimptsN0002.fits'))
        det2aimpts = {'ACIS-I': 'AI2', 'ACIS-S': 'AS1', 'HRC-I': 'HI1', 'HRC-S': 'HS1'}
        aimp = np.array([n.strip() for n in aimpts['AIMPOINT_NAME']])
        ind = aimp == det2aimpts[det]
        marx_pars['DetOffsetX'] = evt['SIM_X'] - aimpts[ind]['AIMPOINT'][0][0]
        # No Y offset possible
        marx_pars['DetOffsetZ'] = evt['SIM_Z'] - aimpts[ind]['AIMPOINT'][0][2]
    else:
        warn('detector {0} not understood. No DetectorType or aimpoint offset set.'.format(asol['DETNAM']))
    return marx_pars
Beispiel #23
0
def OutputFitsFileExtensions(column_dicts, template, outfilename, mode="append", headers_info=[], primary_header={}):
    """
    Function to output a fits file
    column_dict is a dictionary where the key is the name of the column
       and the value is a np array with the data. Example of a column
       would be the wavelength or flux at each pixel
    template is the filename of the template fits file. The header will
       be taken from this file and used as the main header
    mode determines how the outputted file is made. Append will just add
       a fits extension to the existing file (and then save it as outfilename)
       "new" mode will create a new fits file.
    header_info takes a list of lists. Each sub-list should have size 2 where the first element is the name of the new keyword, and the second element is the corresponding value. A 3rd element may be added as a comment
    primary_header takes a dictionary with keywords to insert into the primary fits header (and not each extension)
    """

    # Get header from template. Use this in the new file
    if mode == "new":
        header = pyfits.getheader(template)

    if not isinstance(column_dicts, list):
        column_dicts = [column_dicts, ]
    if len(headers_info) < len(column_dicts):
        for i in range(len(column_dicts) - len(headers_info)):
            headers_info.append([])

    if mode == "append":
        hdulist = pyfits.open(template)
    elif mode == "new":
        header = pyfits.getheader(template)
        pri_hdu = pyfits.PrimaryHDU(header=header)
        hdulist = pyfits.HDUList([pri_hdu, ])

    if len(primary_header.keys()) > 0:
        for key in primary_header:
            hdulist[0].header[key] = primary_header[key]

    for i in range(len(column_dicts)):
        column_dict = column_dicts[i]
        header_info = headers_info[i]
        columns = []
        for key in column_dict.keys():
            columns.append(pyfits.Column(name=key, format="D", array=column_dict[key]))
        cols = pyfits.ColDefs(columns)
        tablehdu = pyfits.BinTableHDU.from_columns(cols)

        # Add keywords to extension header
        num_keywords = len(header_info)
        header = tablehdu.header
        for i in range(num_keywords):
            info = header_info[i]
            if len(info) > 2:
                header.set(info[0], info[1], info[2])
            elif len(info) == 2:
                header.set(info[0], info[1])

        hdulist.append(tablehdu)

    hdulist.writeto(outfilename, clobber=True, output_verify='ignore')
    hdulist.close()
Beispiel #24
0
    def use_correlation(self):
        """
        Use correlation data-cube.
        """
        import numpy

        from astropy.io.fits import getdata, getheader, writeto
        from glob import glob
        from os.path import splitext
        from sys import stdout

        self.print("\n A correlation cube will be used.")
        self.print(" Looking for an existing correlation data-cube in the current folder.")
        candidates = glob("*.fits")

        corr_cube = None
        for candidate in candidates:
            if 'CORRFROM' in getheader(candidate):
                if getheader(candidate)['CORRFROM'] == self.input_file:
                    self.print(" Correlation cube to be used: %s" % candidate)
                    return candidate

        if corr_cube is None:
            self.print(" Correlation cube not found. Creating a new one.")
            data = getdata(self.input_file)
            corr_cube = numpy.empty_like(data)

            x = numpy.arange(self.width)
            y = numpy.arange(self.height)
            x, y = numpy.meshgrid(x, y)
            x, y = numpy.ravel(x), numpy.ravel(y)

            for i in range(x.size):
                s = data[:, y[i], x[i]]
                s = s / s.max()  # Normalize
                s = s - s.mean()  # Remove mean to avoid triangular shape
                s = numpy.correlate(s, self.ref_s, mode='same')
                corr_cube[:, y[i], x[i]] = s

                temp = ((i + 1) * 100.00 / x.size)
                stdout.write('\r %2d%% ' % temp)
                stdout.write(self.loading[int(temp * 10 % 5)])
                stdout.flush()

            self.print(" Done.")
            corr_name = splitext(self.input_file)[0] + '--corrcube.fits'
            self.print(" Saving correlation cube to %s" % corr_name)

            corr_hdr = self.header.copy()
            corr_hdr.set('CORRFROM', self.input_file, 'Cube used for corrcube.')
            corr_hdr.set('', '', before='CORRFROM')
            corr_hdr.set('', '--- Correlation cube ---', before='CORRFROM')

            writeto(corr_name, corr_cube, corr_hdr, clobber=True)
            del corr_hdr
            del corr_cube

            return corr_name
Beispiel #25
0
def reduce_all_cubes_for_map(mapname, lowhigh='high', **kwargs):
    for dataset,maps in make_apex_cubes.datasets_2014.items():
        if mapname in maps:
            date = dataset[-10:]
            both_directions = True
            try:
                make_apex_cubes.build_cube_2014(mapname,
                                                lowhigh=lowhigh,
                                                posang=[50,70],
                                                datasets=[dataset],
                                                extra_suffix='_cal{0}_lscans'.format(date),
                                                **kwargs)
            except IndexError:
                both_directions = False

            try:
                make_apex_cubes.build_cube_2014(mapname,
                                                lowhigh=lowhigh,
                                                posang=[140,160],
                                                datasets=[dataset],
                                                extra_suffix='_cal{0}_bscans'.format(date),
                                                **kwargs)
            except IndexError:
                both_directions = False

            if both_directions:
                fileb = os.path.join(outdir, 'APEX_H2CO_2014_{1}_{2}_cal{0}_bscans.fits'.format(date, mapname, lowhigh))
                filel = os.path.join(outdir, 'APEX_H2CO_2014_{1}_{2}_cal{0}_lscans.fits'.format(date, mapname, lowhigh))

                cubeb = fits.getdata(fileb)
                cubel = fits.getdata(filel)

                if cubeb.shape != cubel.shape:
                    header = FITS_tools.fits_overlap(fileb, filel)
                    hdb = fits.getheader(fileb)
                    # Add back 3rd dimension... HACK
                    for key in hdb:
                        if key[0] == 'C' and key.strip()[-1] == '3':
                            header[key] = hdb[key]

                    FITS_tools.regrid_fits_cube(fileb, outheader=header, outfilename=fileb, clobber=True)
                    FITS_tools.regrid_fits_cube(filel, outheader=header, outfilename=filel, clobber=True)

                cubeb = fits.getdata(fileb)
                cubel = fits.getdata(filel)

                if cubeb.shape != cubel.shape:
                    log.fatal("Cube shapes don't match: {0}, {1}".format(cubeb.shape,cubel.shape))
                    raise ValueError

                cube_comb = plait.plait_cube([cubeb,cubel], angles=[0, 90], scale=5)
                cube_comb_naive = (cubeb+cubel)/2.

                header = fits.getheader(fileb)
                fits.PrimaryHDU(data=cube_comb,
                                header=header).writeto(os.path.join(outdir, '{1}_{2}_cal{0}_plait.fits'.format(date, mapname, lowhigh)),
                                                       clobber=True)
Beispiel #26
0
def identify_wcs1d_fits(origin, *args, **kwargs):
    # check if file can be opened with this reader
    # args[0] = filename
    return (isinstance(args[0], str) and
            os.path.splitext(args[0].lower())[1] == '.fits' and
            # check if number of axes is one
            fits.getheader(args[0])['NAXIS'] == 1 and
            # check if CTYPE1 kep is in the header
            'CTYPE1' in fits.getheader(args[0])
            )
Beispiel #27
0
def imsubtract( image1, image2, outfile=None, 
                clobber=False, verbose=False, debug=False):
    """
    Construct a simple subtraction: image2 - image1.  Guards against
    different sized data arrays by assuming that the lower left pixel
    (0,0) is the anchor point.  (i.e. the second image will be
    trimmed or extended if needed.)  
    """
    import os
    import exceptions

    if debug  : import pdb; pdb.set_trace()

    if outfile : 
        if os.path.isfile( outfile ) and not clobber : 
            print("%s exists. Not clobbering."%outfile)
            return( outfile )

    # read in the images
    if not os.path.isfile( image1 ) :
        raise exceptions.RuntimeError(
            "The image file %s is not valid."%image1 )
    im1head = pyfits.getheader( image1 )
    im1data = pyfits.getdata( image1 )

    if not os.path.isfile( image2 ) :
        raise exceptions.RuntimeError(
            "The image file %s is not valid."%image2 )
    im2head = pyfits.getheader( image2 )
    im2data = pyfits.getdata( image2 )

    # sometimes multidrizzle drops a pixel. Unpredictable.
    nx2,ny2 = im2data.shape
    nx1,ny1 = im1data.shape
    if nx2>nx1 or ny2>ny1 : 
        im2data = im2data[:min(nx1,nx2),:min(ny1,ny2)]
        im1data = im1data[:min(nx1,nx2),:min(ny1,ny2)]
    elif nx2<nx1 or ny2<ny1 : 
        im1data = im1data[:min(nx1,nx2),:min(ny1,ny2)]
        im2data = im2data[:min(nx1,nx2),:min(ny1,ny2)]

    diffim =  im2data - im1data
    
    if not outfile :
        return( diffim )
    else : 
        im2head["SRCIM1"] = (image1,"First source image = template for subtraction")
        im2head["SRCIM2"] = (image2,"Second source image = search epoch image")
        outdir = os.path.split( outfile )[0]
        if outdir and not os.path.isdir(outdir): 
            os.makedirs( outdir )
        pyfits.writeto( outfile, diffim, 
                        header=im2head,
                        clobber=clobber )
        return( outfile )
Beispiel #28
0
def db_ingest(filepath, filename, force=False):
    '''Read an image header and add a row to the database'''
    global telescopeids, instrumentids
    if '-en' in filename:
        table = 'speclcoraw'
        db_to_hdrkey = speclcoraw_to_hdrkey
    else:
        table = 'photlcoraw'
        db_to_hdrkey = photlcoraw_to_hdrkey
    fileindb = lsc.mysqldef.getfromdataraw(conn, table, 'filename', filename, column2='filepath')
    if fileindb:
        filepath = fileindb[0]['filepath'] # could be marked as bad
    if not fileindb or force:
        if filename[-3:] == '.fz':
            hdr = fits.getheader(filepath + filename, 1)
        else:
            hdr = fits.getheader(filepath + filename)
        groupidcode, targetid = get_groupidcode(hdr)
        dbdict = {'filename': filename,
                  'filepath': filepath,
                  'groupidcode': groupidcode,
                  'targetid': targetid}
        for dbcol, hdrkey in db_to_hdrkey.items():
            if hdrkey in hdr and hdr[hdrkey] not in lsc.util.missingvalues:
                if hdrkey in ['RA', 'CAT-RA']:
                    dbdict[dbcol] = Angle(hdr[hdrkey], u.hourangle).to_string(u.deg, decimal=True, precision=7)
                elif hdrkey in ['DEC', 'CAT-DEC']:
                    dbdict[dbcol] = Angle(hdr[hdrkey], u.deg).to_string(decimal=True, precision=7)
                elif hdrkey == 'DATE-OBS':
                    dbdict[dbcol] = hdr['DATE-OBS'].split('T')[0]
                elif hdrkey == 'UTSTART':
                    dbdict[dbcol] = hdr['UTSTART'].split('.')[0]
                else:
                    dbdict[dbcol] = hdr[hdrkey]
        if hdr['TELESCOP'] not in telescopeids:
            logger.info('{} not recognized. Adding to telescopes table.'.format(hdr['TELESCOP']))
            lsc.mysqldef.insert_values(conn, 'telescopes', {'name': hdr['TELESCOP']})
            telescopes = lsc.mysqldef.query(['select id, name from telescopes'], conn)
            telescopeids = {tel['name']: tel['id'] for tel in telescopes}
        dbdict['telescopeid'] = telescopeids[hdr['TELESCOP']]
        if hdr['INSTRUME'] not in instrumentids:
            logger.info('{} not recognized. Adding to instruments table.'.format(hdr['INSTRUME']))
            lsc.mysqldef.insert_values(conn, 'instruments', {'name': hdr['INSTRUME']})
            instruments = lsc.mysqldef.query(['select id, name from instruments'], conn)
            instrumentids = {inst['name']: inst['id'] for inst in instruments}
        dbdict['instrumentid'] = instrumentids[hdr['INSTRUME']]
        if fileindb:
            lsc.mysqldef.query(["delete from " + table + " where filename='" + filename + "'"], conn)
        logger.info('ingesting {}'.format(filename))
        lsc.mysqldef.insert_values(conn, table, dbdict)
    else:
        dbdict = {}
        logger.info('{} already ingested'.format(filename))
    return dbdict
Beispiel #29
0
def project_data_into_region(from_data_fn, to_region = "SC_241"):
    
    if to_region == "SC_241":
        to_region_fn = "/Volumes/DataDavy/GALFA/SC_241/LAB_corrected_coldens.fits"
        to_region_hdr = fits.getheader(to_region_fn)
    
    from_data_hdr = fits.getheader(from_data_fn)
    from_data_data = fits.getdata(from_data_fn)
     
    new_image, footprint = reproject_interp((from_data_data, from_data_hdr), to_region_hdr) 
    
    return new_image
Beispiel #30
0
def getJD(header, rootdir="./"):
    """
    Use the header of a co-added file to determine the julian date
    in the middle of the total observation
    """
    # First, get the filenames that were used
    fileList = [k for k in header.keys() if "FILE" in k]
    fileheader = fits.getheader(rootdir + header[fileList[0]])
    firstjd = fileheader['jd']
    fileheader = fits.getheader(rootdir + header[fileList[-1]])
    lastjd = fileheader['jd'] + fileheader['exptime'] / (24.0 * 3600.0)
    return (firstjd + lastjd) / 2.0
Beispiel #31
0
def detect_sources(exp, sex_config, sex_io_dir, dual_exp=None, 
                   delete_created_files=True, label='hugs',
                   original_fn=None):
    """
    Source detection using SExtractor.

    Parameters
    ----------
    exp : lsst.afw.image.ExposureF
        Exposure object to run sextractor on.
    sex_config : dict
        SExtractor configuration that is is different for its default.
    sex_io_dir : str
        Input/output directory for files needed to run SExtractor.
    dual_exp : lsst.afw.image.ExposureF, optional
        Dual exposure for forced photometry. 
    delete_created_files : bool, optional
        If True, remove all files created for and by SExtractor.
    label : str
        Label for this run.

    Returns
    -------
    cat : astropy.table.Table
        SExtractor catalog.
    """

    sw = sextractor.Wrapper(sex_config, sex_io_dir)

    #########################################################
    # write exposure for sextractor input and run
    #########################################################

    detect_band = exp.getFilter().getName().lower()
    # some bands have numbers --> get the relevant letter
    detect_band = [b for b in detect_band if b in 'gri'][0] 
    exp_fn = sw.get_io_dir('exp-{}-{}.fits'.format(label, detect_band))

    # HACK: work around strange new bug related to SEXtractor
    if original_fn is None:
        exp.writeFits(exp_fn)
    else:
        header = fits.getheader(original_fn, ext=1)
        fits.writeto(exp_fn, exp.getImage().getArray(), header, overwrite=True)

    if dual_exp is not None:
        meas_band = dual_exp.getFilter().getName().lower()
        meas_band = [b for b in meas_band if b in 'gri'][0]
        dual_fn = sw.get_io_dir('exp-{}-{}.fits'.format(label, meas_band))

        # HACK: work around strange new bug related to SEXtractor
        if original_fn is None:
            dual_exp.writeFits(dual_fn)
        else:
            fn = original_fn.replace('HSC-'+detect_band.upper(), 
                                     'HSC-'+meas_band.upper())
            header = fits.getheader(fn, ext=1)
            fits.writeto(dual_fn, dual_exp.getImage().getArray(), header, 
                         overwrite=True)

        run_fn = exp_fn+'[1],'+dual_fn+'[1]'
        cat_label = 'sex-{}-{}-{}'.format(label, detect_band, meas_band)
    else:
        meas_band = detect_band
        cat_label = 'sex-{}-{}'.format(label, detect_band)
        run_fn = exp_fn+'[1]'

    cat_fn = sw.get_io_dir(cat_label+'.cat')

    #########################################################
    # run SExtactor and get catalog
    #########################################################

    sw.run(run_fn, cat_fn=cat_fn)
    cat = sextractor.read_cat(sw.get_io_dir(cat_fn))

    if len(cat)>0:

        #########################################################
        # only save positions from the primary detection band
        #########################################################

        detect_band_only = [
            'X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'FLAGS', 
            'PETRO_RADIUS', 'THETA_IMAGE', 'A_IMAGE', 'B_IMAGE', 
            'ELLIPTICITY', 'KRON_RADIUS'
        ]

        ebv = dustmap.ebv(cat['ALPHA_J2000'], cat['DELTA_J2000'])

        if meas_band==detect_band:
            x0, y0 = exp.getXY0()
            cat['X_IMAGE'] -= 1
            cat['Y_IMAGE'] -= 1
            cat['X_HSC'] = cat['X_IMAGE'] + x0 
            cat['Y_HSC'] = cat['Y_IMAGE'] + y0 
            detect_band_only.append('X_HSC')
            detect_band_only.append('Y_HSC')
        else:
            cat.remove_columns(detect_band_only)

        #########################################################
        # rename columns, change units of flux_radius and 
        # fwhm_image to arcsec, add extinction params
        #########################################################

        cat.rename_column('MAG_APER', 'MAG_APER_0')
        cat.rename_column('MAGERR_APER', 'MAGERR_APER_0')
        cat.rename_column('FLUX_RADIUS', 'FLUX_RADIUS_0')

        for i, diam in enumerate(sex_config['PHOT_APERTURES'].split(',')):
            cat.rename_column('MAG_APER_'+str(i), 'mag_ap'+str(i))
            cat.rename_column('MAGERR_APER_'+str(i), 'magerr_ap'+str(i))
        
        for i, frac in enumerate(sex_config['PHOT_FLUXFRAC'].split(',')):
            frac = str(int(100 * float(frac)))
            cat.rename_column('FLUX_RADIUS_'+str(i), 'flux_radius_'+frac)
            cat['flux_radius_'+frac] *= utils.pixscale

        cat['FWHM_IMAGE'] = cat['FWHM_IMAGE']*utils.pixscale
        cat.rename_column('FWHM_IMAGE', 'FWHM')

        for name in cat.colnames:
            if name not in detect_band_only: 
                cat.rename_column(name, name.lower()+'_'+meas_band)
            else:
                cat.rename_column(name, name.lower())
        if meas_band==detect_band:
            cat.rename_column('alpha_j2000', 'ra')
            cat.rename_column('delta_j2000', 'dec')
            cat['ebv'] = ebv
        cat['A_'+meas_band] = ebv*getattr(utils.ext_coeff, meas_band)

    #########################################################
    # delete files created by and for sextractor
    #########################################################

    if delete_created_files:
        if dual_exp is not None:
            os.remove(dual_fn)
        os.remove(exp_fn)
        os.remove(cat_fn)

    return cat
Beispiel #32
0
    mprocPool.close()

    return wav_chip, flux_conv_res


def save_new_data(fname, flux, hdr):
    fits.writeto(fname, flux, hdr, overwrite=True)


if __name__ == "__main__":

    filepaths = np.loadtxt("filelist.dat", dtype=str)

    wavelength_range = np.loadtxt("lines.rdb", skiprows=2)

    R = 50000

    for fname in np.arange(len(filepaths)):
        wav, flux = read_data(fname)
        hdr = fits.getheader(fname)

        wav_limits = [wav[0], wav[-1]]
        convolved_wav, convolved_flux = ip_convolution(wav,
                                                       flux,
                                                       wav_limits,
                                                       R,
                                                       fwhm_lim=5.0)

        newname = fname.replace(".fits", "") + "new_resolution.fits"
        save_new_data(newname, convolved_flux, hdr)
Beispiel #33
0
    def __init__(self,
                 xarr,
                 grid_vwidth=1.0,
                 grid_vwidth_scale=False,
                 texgrid=None,
                 taugrid=None,
                 hdr=None,
                 path_to_texgrid='',
                 path_to_taugrid='',
                 temperature_gridnumber=3,
                 debug=False,
                 verbose=False,
                 modelfunc=None,
                 **kwargs):
        """
        Use a grid of RADEX-computed models to make a model line spectrum

        The RADEX models have to be available somewhere.
        OR they can be passed as arrays.  If as arrays, the form should be:
        texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))

        xarr must be a SpectroscopicAxis instance
        xoff_v, width are both in km/s.  With is 'sigma'

        grid_vwidth is the velocity assumed when computing the grid in km/s
            this is important because tau = modeltau / width (see, e.g., 
            Draine 2011 textbook pgs 219-230)
        grid_vwidth_scale is True or False: False for LVG, True for Sphere


        A modelfunc must be specified.  Model functions should take an xarr and
        a series of keyword arguments corresponding to the line parameters
        (Tex, tau, xoff_v, and width (gaussian sigma, not FWHM))
        """

        self.modelfunc = modelfunc
        if self.modelfunc is None:
            raise ValueError(
                "Must specify a spectral model function.  See class help for form."
            )

        if texgrid is None and taugrid is None:
            if path_to_texgrid == '' or path_to_taugrid == '':
                raise IOError("Must specify model grids to use.")
            else:
                self.taugrid = [pyfits.getdata(path_to_taugrid)]
                self.texgrid = [pyfits.getdata(path_to_texgrid)]
                hdr = pyfits.getheader(path_to_taugrid)
                self.yinds, self.xinds = np.indices(self.taugrid[0].shape[1:])
                self.densityarr = (xinds + hdr['CRPIX1'] -
                                   1) * hdr['CD1_1'] + hdr[
                                       'CRVAL1']  # log density
                self.columnarr = (yinds + hdr['CRPIX2'] -
                                  1) * hdr['CD2_2'] + hdr[
                                      'CRVAL2']  # log column
                self.minfreq = (4.8, )
                self.maxfreq = (5.0, )
        elif len(taugrid) == len(texgrid) and hdr is not None:
            self.minfreq, self.maxfreq, self.texgrid = zip(*texgrid)
            self.minfreq, self.maxfreq, self.taugrid = zip(*taugrid)
            self.yinds, self.xinds = np.indices(self.taugrid[0].shape[1:])
            self.densityarr = (xinds + hdr['CRPIX1'] -
                               1) * hdr['CD1_1'] + hdr['CRVAL1']  # log density
            self.columnarr = (yinds + hdr['CRPIX2'] -
                              1) * hdr['CD2_2'] + hdr['CRVAL2']  # log column
        else:
            raise Exception

        # Convert X-units to frequency in GHz
        self.xarr = copy.copy(xarr)
        self.xarr.convert_to_unit('Hz', quiet=True)

        #tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
        #tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])

        if debug:
            import pdb
            pdb.set_trace()
Beispiel #34
0
t = Table()

sector_folders = np.sort(glob.glob(args.Folder + 's00*/'))

for sector in sector_folders:
    sn = int(sector[-3:-1])
    color_print('\nSECTOR %d' % sn, 'cyan')

    for cam in range(1,5):
        for ccd in range(1,5):
            imgs = glob.glob(sector + 'tess*-%d-%d-*_ffic.fits' % (cam, ccd))

            for img in imgs:
                try:
                    hdr = fits.getheader(img, 1)
                    w   = WCS(hdr)

                    T = np.transpose([w.all_pix2world(i,0,0) for i in range(44,2092,10)])
                    R = np.transpose([w.all_pix2world(2091,i,0) for i in range(1,2048,10)])
                    B = np.transpose([w.all_pix2world(i,2047,0) for i in range(2091,43,-10)])
                    L = np.transpose([w.all_pix2world(44,i,0) for i in range(2046,0,-10)])

                    TOP    = SkyCoord(T[0], T[1], unit='deg').transform_to('barycentrictrueecliptic')
                    RIGHT  = SkyCoord(R[0], R[1], unit='deg').transform_to('barycentrictrueecliptic')
                    BOTTOM = SkyCoord(B[0], B[1], unit='deg').transform_to('barycentrictrueecliptic')
                    LEFT   = SkyCoord(L[0], L[1], unit='deg').transform_to('barycentrictrueecliptic')

                    color_print('CAM %d, CCD %d' % (cam, ccd), 'lightgreen')
                    print('\t', TOP[0].lon.degree, TOP[0].lat.degree)
                    print('\t', RIGHT[0].lon.degree, RIGHT[0].lat.degree)
Beispiel #35
0
def main(args):

    log = get_logger()

    nspec = 500  #- Hardcode!  Number of DESI fibers per spectrograph

    #- Sanity check that all spectra are represented
    fibers = set()
    for filename in args.files:
        x = fits.getdata(filename, 'FIBERMAP')
        fibers.update(set(x['FIBER']))

    if len(fibers) != nspec:
        msg = "Input files only have {} instead of {} spectra".format(
            len(fibers), nspec)
        if args.force:
            log.warn(msg)
        else:
            log.fatal(msg)
            sys.exit(1)

    #- Read a file to get basic dimensions
    w = fits.getdata(args.files[0], 'WAVELENGTH')
    nwave = len(w)
    R1 = fits.getdata(args.files[0], 'RESOLUTION')
    ndiag = R1.shape[1]
    hdr = fits.getheader(args.files[0])

    camera = hdr['CAMERA'].lower()  #- b0, r1, .. z9
    spectrograph = int(camera[1])
    fibermin = spectrograph * nspec

    #- Output arrays to fill
    flux = np.zeros((nspec, nwave))
    ivar = np.zeros((nspec, nwave))
    R = np.zeros((nspec, ndiag, nwave))
    fibermap = desispec.io.empty_fibermap(nspec, specmin=fibermin)
    mask = np.zeros((nspec, nwave), dtype=np.uint32)
    chi2pix = np.zeros((nspec, nwave))

    #- Fill them!
    for filename in args.files:
        fx = fits.open(filename)
        xhdr = fx[0].header
        xflux = fx['FLUX'].data
        xivar = fx['IVAR'].data
        xR = fx['RESOLUTION'].data
        xfibermap = fx['FIBERMAP'].data
        xmask = fx['MASK'].data
        xchi2pix = fx['CHI2PIX'].data
        fx.close()

        ii = xfibermap['FIBER'] % nspec

        flux[ii] = xflux
        ivar[ii] = xivar
        R[ii] = xR
        fibermap[ii] = xfibermap
        mask[ii] = xmask
        chi2pix[ii] = xchi2pix

    #- Write it out
    print("Writing", args.output)
    frame = Frame(w,
                  flux,
                  ivar,
                  mask=mask,
                  resolution_data=R,
                  spectrograph=spectrograph,
                  meta=hdr,
                  fibermap=fibermap,
                  chi2pix=chi2pix)
    desispec.io.write_frame(args.output, frame)

    #- Scary!  Delete input files
    if args.delete:
        for filename in args.files:
            os.remove(filename)
def get_all_phoenix():

    all_sed = []

    # define the top directory
    SEDfile_dir = os.path.join(top_pysynphot_data_dir, dir_nostar,
                               dir_submodels[4])

    filelist1 = os.listdir(SEDfile_dir + '/phoenixm00')
    filelist2 = os.listdir(SEDfile_dir + '/phoenixm05')
    filelist3 = os.listdir(SEDfile_dir + '/phoenixm10')
    filelist4 = os.listdir(SEDfile_dir + '/phoenixm15')
    filelist5 = os.listdir(SEDfile_dir + '/phoenixm20')
    filelist6 = os.listdir(SEDfile_dir + '/phoenixm25')
    filelist7 = os.listdir(SEDfile_dir + '/phoenixm30')
    filelist8 = os.listdir(SEDfile_dir + '/phoenixm35')
    filelist9 = os.listdir(SEDfile_dir + '/phoenixm40')
    filelist10 = os.listdir(SEDfile_dir + '/phoenixp03')
    filelist11 = os.listdir(SEDfile_dir + '/phoenixp05')

    filelist1_group = [
        os.path.join('phoenixm00', f) for f in filelist1 if f.endswith('.fits')
    ]
    filelist2_group = [
        os.path.join('phoenixm05', f) for f in filelist2 if f.endswith('.fits')
    ]
    filelist3_group = [
        os.path.join('phoenixm10', f) for f in filelist3 if f.endswith('.fits')
    ]
    filelist4_group = [
        os.path.join('phoenixm15', f) for f in filelist4 if f.endswith('.fits')
    ]
    filelist5_group = [
        os.path.join('phoenixm20', f) for f in filelist5 if f.endswith('.fits')
    ]
    filelist6_group = [
        os.path.join('phoenixm25', f) for f in filelist6 if f.endswith('.fits')
    ]
    filelist7_group = [
        os.path.join('phoenixm30', f) for f in filelist7 if f.endswith('.fits')
    ]
    filelist8_group = [
        os.path.join('phoenixm35', f) for f in filelist8 if f.endswith('.fits')
    ]
    filelist9_group = [
        os.path.join('phoenixm40', f) for f in filelist9 if f.endswith('.fits')
    ]
    filelist10_group = [
        os.path.join('phoenixp03', f) for f in filelist10
        if f.endswith('.fits')
    ]
    filelist11_group = [
        os.path.join('phoenixp05', f) for f in filelist11
        if f.endswith('.fits')
    ]

    filelist_group=filelist1_group + filelist2_group + filelist3_group + filelist4_group + filelist5_group+ \
filelist6_group + filelist7_group + filelist8_group + filelist9_group +filelist10_group+filelist11_group

    fits_files = filelist_group

    obj_headers = []
    obj_files = []
    for filename in fits_files:
        index = 0
        if re.search('fits', filename):  #example of filename filter
            index += 1
            fullfilename = os.path.join(SEDfile_dir, filename)
            hdr = fits.getheader(fullfilename)
            obj_headers.append(hdr)
            obj_files.append(filename)

    obj_temperatures = []
    obj_log_z_all = []
    index = 0
    for hdr in obj_headers:
        obj_temp = float(obj_headers[index]['TEFF'])
        obj_logz = float(obj_headers[index]['LOG_Z'])
        obj_temperatures.append(obj_temp)
        obj_log_z_all.append(obj_logz)
        index += 1

    obj_names2 = []
    index = 0
    for thefile in fits_files:
        #thenames=re.findall('^bk_([a-z][0-9]+).fits$',thefile)
        thenames = re.findall('([a-z].+_[0-9].+).fits$', thefile)
        if (len(thenames) > 0):
            obj_names2.append(thenames[0])
        else:
            print 'bad file ', thefile
        index += 1

    obj_names = obj_names2
    obj_files = filelist_group

    #objames_and_objfiles = zip(obj_names, obj_files)
    #objames_and_objtemp = zip(obj_names, obj_temperatures)
    objtemp_and_objlogz = zip(obj_temperatures, obj_log_z_all)

    #all_logg=np.array([0.0,0.5,1.,1.5,2.,2.5,3.,3.5,4.,4.5])
    #all_logg=np.array([0.0,1.,2.,3.,4.])
    all_logg = np.array([0.0])

    index = 0
    for temp, logz in objtemp_and_objlogz:
        if index % 100 == 0:
            print 'phoenix star : T=', temp, ' metal=', logz
        for logg in all_logg:
            #Icat(model,temp,logz,logg)
            sed = S.Icat('phoenix', temp, logz, logg)
            sed.convert('flam')  # to be sure every spectrum is in flam unit
            all_sed.append(sed)
            index += 1

    return all_sed
Beispiel #37
0
    'UTSTTIME', 'UTNDTIME', 'RA', 'DEC', 'PAP', 'PAD', 'IMRA', 'AIRMASS'
])
allRows = []
displayCount = 0
displayInt = 10
# Go through files and extract headers
for filename in allFilenames:

    if displayCount == displayInt:
        print('Extracting header from ' + filename)
        displayCount = 0
    else:
        displayCount = displayCount + 1

    try:
        hdr = fits.getheader(filename, 1)
    except:
        print("File ignored, couldn't load FITS header")
        continue

    try:
        UTSTTIME = hdr['UTSTTIME']
        UTNDTIME = hdr['UTNDTIME']
        RA = hdr['RA']
        DEC = hdr['DEC']
        PAP = hdr['PAP']
        PAD = hdr['PAD']
        IMRA = hdr['IMRA']
        AIRMASS = hdr['AIRMASS']
    except:
        print("File ignored, couldn't find all keywords.")
Beispiel #38
0
def separate_period(base_dir):
    """Separate observations in the base dir into needed folders.

    Parameters
    ----------
    base_dir, str
        directory containing darks and biases to be split.

    """


    print('Separating', base_dir)
    all_files = glob.glob(os.path.join(base_dir, 'o*_raw.fits'))
    if not len(all_files):
        print("nothing to move")
        return

    mjd_times = np.array([fits.getval(item, 'EXPSTART', ext=1)
                          for item in all_files])
    month_begin = mjd_times.min()
    month_end = mjd_times.max()
    print('All data goes from', month_begin, ' to ',  month_end)

    select_gain = {'WK' : 1,
                   'BIWK' : 4}

    for file_type, mode in zip(['BIAS', 'DARK', 'BIAS'],
                               ['WK', 'WK', 'BIWK']):

        gain = select_gain[mode]

        obs_list = []
        for item in all_files:
            with fits.open(item) as hdu:
                if (hdu[0].header['TARGNAME'] == file_type) and (hdu[0].header['CCDGAIN'] == gain):
                    obs_list.append(item)

        if not len(obs_list):
            print('{} No obs to move.  Skipping'.format(mode))
            continue
        else:
            print(file_type,  mode, len(obs_list), 'files to move, ', 'gain = ', gain)

        N_days = int(month_end - month_begin)
        N_periods = figure_number_of_periods(N_days, mode)
        week_lengths = functions.figure_days_in_period(N_periods, N_days)

        #--Add remainder to end
        week_lengths[-1] += (month_end - month_begin) - N_days

        #-- Translate to MJD
        anneal_weeks = []
        start = month_begin
        end = start + week_lengths[0]
        anneal_weeks.append((start, end))
        for item in week_lengths[1:]:
            start = end
            end += item
            anneal_weeks.append((start, end))

        print()
        print(file_type, mode, 'will be broken up into %d periods as follows:'%(N_periods))
        print('\tWeek start, Week end')
        for a_week in anneal_weeks:
            print('\t', a_week)
        print()

        for period in range(N_periods):
            begin, end = anneal_weeks[period]
            # weeks from 1-4, not 0-3
            week = str(period + 1)
            while len(week) < 2:
                week = '0' + week

            output_path = base_dir
            if file_type == 'BIAS':
                output_path = os.path.join(output_path,
                                           'biases/%d-1x1/%s%s/'%(gain,
                                                                  mode.lower(),
                                                                  week))
            elif file_type == 'DARK':
                output_path = os.path.join(output_path,
                                           'darks/%s%s/'%(mode.lower(), week))
            else:
                print('File Type not recognized')

            print(output_path)
            if not os.path.exists(output_path):
                os.makedirs(output_path)

            print('week goes from: ', begin, end)
            obs_to_move = [item for item in obs_list if
                           (begin <= fits.getval(item, 'EXPSTART', ext=1) <= end)]

            if not len(obs_to_move):
                raise ValueError('error, empty list to move')

            for item in obs_to_move:
                print('Moving ', item,  ' to:', output_path)
                shutil.move(item,  output_path)
                if not 'IMPHTTAB' in fits.getheader(os.path.join(output_path,
                                                                 item.split('/')[-1]), 0):
                    ###Dynamic at some point
                    fits.setval(os.path.join(output_path, item.split('/')[-1]),
                                'IMPHTTAB',
                                ext=0,
                                value='oref$x9r1607mo_imp.fits')

                obs_list.remove(item)
                all_files.remove(item)
def match_IPS_keywords(stsci_pipe_ready_file, ips_file, cmd_line_args=None):
    """
    This function performs the change of keyword values for the STScI pipeline-ready file to match the values in the
    IPS file.
    :param stsci_pipe_ready_file: string, path and name of the STScI pipeline-ready file
    :param ips_file: string, path and name of the IPS file
    :param cmd_line_args: dictionary, keywords and corresponding values indicated in the command line
    :return: nothing; the input fits file with the modified keyword values
    """
    # get the headers from the IPS file
    primary_ext_ips_keywd_dict = fits.getheader(ips_file, 0)
    header_ext_ips_keywd_dict = fits.getheader(ips_file, extname='header')

    # get the header from the STScI pipeline-ready file
    st_pipe_ready_dict = fits.getheader(stsci_pipe_ready_file, 0)

    # iterate over the map of STScI to IPS keywords dictionary (defined in this script)
    for key2modify, val2modify in stsci2ips_dict.items():
        if val2modify == 'N/A':
            # for a non applicable (not too important) keyword for the simulations, set value to N/A
            st_pipe_ready_dict[key2modify] = 'N/A'

        if 'primary_ext' in val2modify:
            # look for the same keyword in IPS file in the primary extension header
            ips_key = val2modify.split(':')[-1]
            if 'SUBARRAY' in key2modify:
                set_subarray_and_size_keywds(primary_ext_ips_keywd_dict,
                                             st_pipe_ready_dict,
                                             stsci_pipe_ready_file)
            else:
                change_keyword2ips_value(primary_ext_ips_keywd_dict,
                                         st_pipe_ready_dict, ips_key,
                                         key2modify, stsci_pipe_ready_file)

        if 'header_ext' in val2modify:
            # look for the same keyword in IPS file in the header extension
            ips_key = val2modify.split(':')[-1]
            change_keyword2ips_value(header_ext_ips_keywd_dict,
                                     st_pipe_ready_dict, ips_key, key2modify,
                                     stsci_pipe_ready_file)

        if 'set_to_given_string' in val2modify:
            # change the keyword value to that given in the command line - this is optional
            if cmd_line_args is None:
                continue
            else:
                if key2modify in cmd_line_args:
                    print('Modified keyword: ', key2modify, '   old_value=',
                          st_pipe_ready_dict[key2modify], '   new_value=',
                          cmd_line_args[key2modify])
                    fits.setval(stsci_pipe_ready_file,
                                key2modify,
                                value=cmd_line_args[key2modify])
                else:
                    print('Value for keyword=', key2modify,
                          ' not provided with line command.')
                    continue

        if 'calculation' in val2modify:
            print('Value for keyword ', key2modify, ' will be calculated...')
            if key2modify == 'EXPSTART':
                # put the dates/times values into the time stamp format to do operations
                dateobs_string = st_pipe_ready_dict['DATE-OBS'].replace(
                    'T', ' ')
                dateobs = datetime.timestamp(
                    datetime.fromisoformat(dateobs_string))
                timeobs_string = st_pipe_ready_dict['TIME-OBS']
                timeobs_string = dateobs_string + ' ' + timeobs_string
                timeobs = datetime.timestamp(
                    datetime.fromisoformat(timeobs_string))
                visitstart_string = st_pipe_ready_dict['VSTSTART'].replace(
                    'T', ' ')
                visitstart = datetime.timestamp(
                    datetime.fromisoformat(visitstart_string))
                # this the calculation follows the JWST keyword dictionary calculation:
                # https://mast.stsci.edu/portal/Mashup/Clients/jwkeywords/
                # expstart = input('DATE-OBS') + 'T' + input('TIME-OBS') / UTC exposure start time (MJD)
                # However, we are using the visit start time instead of the last argument since we do not have an
                # actual exposure start time, and because the exposures are 1 in this case
                new_val = dateobs + timeobs / visitstart
                # change value in dictionary for use within the script
                st_pipe_ready_dict['EXPSTART'] = new_val

            if key2modify == 'DURATION':
                # this the calculation follows the JWST keyword dictionary calculation:
                # https://mast.stsci.edu/portal/Mashup/Clients/jwkeywords/
                # duration = TFRAME*((NGROUPS*NFRAMES+(NGROUPS-1)*GROUPGAP+DROPFRAMES1*NINTS)) where DROPFRAMES1 is
                # a lookup in the PRD DataModes table.
                # However, NIRSpec only drops frames in TA, hence the calculation simplifies to:
                # duration = TFRAME*((NGROUPS*NFRAMES))
                new_val = st_pipe_ready_dict['TFRAME'] * st_pipe_ready_dict[
                    'NGROUPS'] * st_pipe_ready_dict['NFRAMES']
                # change value in dictionary for use within the script
                st_pipe_ready_dict['DURATION'] = new_val

            if key2modify == 'EXPEND':
                new_val = st_pipe_ready_dict['EXPSTART'] + st_pipe_ready_dict[
                    'DURATION']
                # change value in dictionary for use within the script
                st_pipe_ready_dict['EXPEND'] = new_val

            if key2modify == 'EXPMID':
                new_val = st_pipe_ready_dict['EXPSTART'] + (
                    st_pipe_ready_dict['DURATION'] / 2.0)
                # change value in dictionary for use within the script
                st_pipe_ready_dict['EXPMID'] = new_val

            if key2modify == 'TSAMPLE':
                # this the calculation follows the JWST keyword dictionary calculation:
                # https://mast.stsci.edu/portal/Mashup/Clients/jwkeywords/
                # tsample = readout pattern lookup
                # table taken from:
                # https://jwst-docs.stsci.edu/near-infrared-spectrograph/nirspec-instrumentation/nirspec-detectors/nirspec-detector-readout-modes-and-patterns
                readout_patterns = {
                    'NRSRAPID': 10.737,  # frames=1
                    'NRSRAPIDD1': 21.474,  # frames=1
                    'NRSRAPIDD2': 32.210,  # frames=1
                    'NRSRAPIDD6': 75.159,  # frames=1
                    'NRS': 42.947,  # frames=4
                    'NRSIRS2RAPID': 14.589,  # frames=1
                    'NRSIRS2': 72.944
                }  # frames=5
                new_val = readout_patterns[st_pipe_ready_dict['READPATT']]
                # change value in dictionary for use within the script
                st_pipe_ready_dict['TSAMPLE'] = new_val

            if key2modify == 'TGROUP':
                # this the calculation follows the JWST keyword dictionary calculation:
                # https://mast.stsci.edu/portal/Mashup/Clients/jwkeywords/
                # tgroup = (GROUPGAP+NFRAMES)*TFRAME
                # However, NIRSpec only drops frames in TA, hence the calculation simplifies to:
                # tgroup = NFRAMES*TFRAME
                # reference for GROUPGAP: http://www.stsci.edu/~tumlinso/nirspec_ocd_v6_DRAFT.pdf
                new_val = st_pipe_ready_dict['NFRAMES'] * st_pipe_ready_dict[
                    'TFRAME']
                # change value in dictionary for use within the script
                st_pipe_ready_dict['TGROUP'] = new_val

            print('Modified keyword: ', key2modify, '   old_value=',
                  st_pipe_ready_dict[key2modify], '   new_value=', new_val)
            print('    * WARNING: This calculation needs to be verified ')
            fits.setval(stsci_pipe_ready_file, key2modify, value=new_val)

        elif 'specific_string' in val2modify:
            # now set all the other keyword whose string value will not change from simulation from simulation, this
            # is the case for the 'specific_string' in the map of STScI to IPS keywords dictionary
            new_val = val2modify.split(':')[-1]
            print('Modified keyword: ', key2modify, '   old_value=',
                  st_pipe_ready_dict[key2modify], '   new_value=', new_val)
            fits.setval(stsci_pipe_ready_file, key2modify, value=val2modify)
Beispiel #40
0
def which_band_is_file(filename):
    """This resuts the band of the given file if it is a
    stella one"""
    if not is_stella_file(filename):
        return None
    return pf.getheader(filename, ext=1).get("FILTER")
Beispiel #41
0
def get_offsets(path0, mylogger=None, silent=True, verbose=False):
    '''
    Function to get offsets from FITS header and write it to ASCII file

    Parameters
    ----------
    path0 : str
     Path to FITS file. Must include '/' at the end

    silent : boolean
      Turns off stdout messages. Default: False

    verbose : boolean
      Turns on additional stdout messages. Default: True

    Returns
    -------
    tab0 : astropy.table.Table
     Astropy ASCII table written to [path0]+'sci_offsets.tbl'

    Notes
    -----
    Created by Chun Ly, 30 May 2017
    Modified by Chun Ly, 10 December 2017
     - Implement glog logging, allow mylogger keyword input
    Modified by Chun Ly, 17 December 2017
     - Minor fix: log -> clog
     - Call dir_check with mylogger input
    '''

    # + on 10/12/2017
    if type(mylogger) == type(None):
        mylog, clog = 0, log
    else:
        mylog, clog = 1, mylogger

    if silent == False: clog.info('### Begin get_offsets : ' + systime())

    # Mod on 17/12/2017
    if not mylog:
        dir_list, list_path = dir_check.main(path0,
                                             silent=silent,
                                             verbose=verbose)
    else:
        dir_list, list_path = dir_check.main(path0,
                                             silent=silent,
                                             verbose=verbose,
                                             mylogger=clog)

    for path in list_path:
        outfile = path + 'sci_offsets.tbl'

        if exists(outfile):
            # Mod on 10/12/2017
            clog.warning('File exists : ' + outfile)
            clog.warning('Not over-writing!!! ')
        else:
            fits_files = np.loadtxt(path + 'obj.lis', dtype=type(str))
            fits_files = [path + file0 for file0 in fits_files]  # Bug fix
            n_files = len(fits_files)

            names0 = ('filename', 'xoffset', 'yoffset', 'poffset', 'qoffset')
            dtype0 = ('S20', 'f8', 'f8', 'f8', 'f8')
            tab0 = Table(names=names0, dtype=dtype0)

            for nn in xrange(n_files):
                basename = os.path.basename(fits_files[nn])
                if verbose == True: log.info('## Reading : ' + basename)
                h0 = fits.getheader(fits_files[nn])
                vec0 = [
                    basename, h0['XOFFSET'], h0['YOFFSET'], h0['POFFSET'],
                    h0['QOFFSET']
                ]
                tab0.add_row(vec0)

            if silent == False: clog.info('Writing : ' + outfile)
            asc.write(tab0, outfile, format='fixed_width_two_line')
        #endelse
    #endfor

    if silent == False: clog.info('### End get_offsets : ' + systime())
Beispiel #42
0
def align_to_reference(ROOT_DIRECT, ALIGN_IMAGE, fitgeometry="shift",
    clean=True, verbose=False, ALIGN_EXTENSION=0, toler=3, skip_swarp=False,
    align_sdss_ds9=False, catalog=None):
    """
xshift, yshift, rot, scale, xrms, yrms = align_to_reference()
    """        
    import os
    import glob
    import shutil
    
    from pyraf import iraf
    from iraf import stsdas,dither
    
    import threedhst
    from threedhst import catIO
    
    no = iraf.no
    yes = iraf.yes
    INDEF = iraf.INDEF
    
    #### Clean slate    
    rmfiles = ['SCI.fits','WHT.fits','align.cat','direct.cat'
               'align.map','align.match','align.reg','align.xy', 
               'direct.reg','direct.xy','ds9_align.tsv']
    
    for file in rmfiles:
        try:
            os.remove(file)
        except:
            pass
    
    if catalog is not None: 
        align_sdss_ds9 = True
                    
    #### Get only images that overlap from the ALIGN_IMAGE list    
    if not align_sdss_ds9:
        align_img_list = find_align_images_that_overlap(ROOT_DIRECT+'_drz.fits', ALIGN_IMAGE, ALIGN_EXTENSION=ALIGN_EXTENSION)
        if not align_img_list:
            print('threedhst.shifts.align_to_reference: no alignment images overlap.')
            return 0,0
    
    #### Use swarp to combine the alignment images to the same image 
    #### dimensions as the direct mosaic
    if (not skip_swarp) & (not align_sdss_ds9):
        try:
            os.remove(ROOT_DIRECT+'_align.fits')
        except:
            pass
        matchImagePixels(input=align_img_list,
                     matchImage=ROOT_DIRECT+'_drz.fits',
                     output=ROOT_DIRECT+'_align.fits', match_extension = 1,
                     input_extension=ALIGN_EXTENSION)
                     
    #### Run SExtractor on the direct image, with the WHT 
    #### extension as a weight image
    se = threedhst.sex.SExtractor()
    se.aXeParams()
    se.copyConvFile()
    se.overwrite = True
    se.options['CHECKIMAGE_TYPE'] = 'NONE'
    se.options['WEIGHT_TYPE']     = 'MAP_WEIGHT'
    se.options['WEIGHT_IMAGE']    = 'WHT.fits'
    se.options['FILTER']    = 'Y'
    ## Detect thresholds (default = 1.5)
    THRESH = 10
    if align_sdss_ds9:
        if 'Vizier' not in REFERENCE_CATALOG:
            THRESH = 20
            
    se.options['DETECT_THRESH']    = '%d' %(THRESH)
    se.options['ANALYSIS_THRESH']  = '%d' %(THRESH)
    se.options['MAG_ZEROPOINT'] = str(threedhst.options['MAG_ZEROPOINT'])

    #### Run SExtractor on direct and alignment images
    ## direct image
    se.options['CATALOG_NAME']    = 'direct.cat'
    iraf.imcopy(ROOT_DIRECT+'_drz.fits[1]',"SCI.fits")
    iraf.imcopy(ROOT_DIRECT+'_drz.fits[2]',"WHT.fits")
    status = se.sextractImage('SCI.fits')

    ## Read the catalog
    directCat = threedhst.sex.mySexCat('direct.cat')

    if align_sdss_ds9:
        ### Use ds9 SDSS catalog to refine alignment
        import threedhst.dq
        import pywcs
        import threedhst.catIO as catIO
        
        wcs = pywcs.WCS(pyfits.getheader('SCI.fits', 0))
        #wcs = pywcs.WCS(pyfits.getheader('Q0821+3107-F140W_drz.fits', 1))
        
        if 'Vizier' in REFERENCE_CATALOG:
            #### Use (unstable) astroquery Vizier search
            #### CFHTLS-Deep: 'Vizier.II/317'
            VIZIER_CAT = REFERENCE_CATALOG.split('Vizier.')[1]
            print('Align to Vizier catalog: http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=%s' %(VIZIER_CAT))
            
            import astroquery
            if astroquery.__version__ < '0.0.dev1078':
                from astroquery import vizier

                query = {}
                query["-source"] = VIZIER_CAT
                #query["-out"] = ["_r", "CFHTLS", "rmag"]
                query["-out"] = ["_RAJ2000", "_DEJ2000"]  ### Just RA/Dec.

                #### Center position and query radius
                r0, d0 = wcs.wcs_pix2sky([[wcs.naxis1/2., wcs.naxis2/2.]], 1)[0]
                rll, dll = wcs.wcs_pix2sky([[0, 0]], 1)[0]
                corner_radius = np.sqrt((r0-rll)**2*np.cos(d0/360.*2*np.pi)**2+(d0-dll)**2)*60.*1.5
                h = query["-c"] = "%.6f %.6f" %(r0, d0)
                query["-c.rm"] = "%.3f" %(corner_radius)  ### xxx check image size

                #### Run the query
                vt = vizier.vizquery(query)
            else:
                #### Newer astroquery
                from astroquery.vizier import Vizier
                import astropy.coordinates as coord
                import astropy.units as u
                
                Vizier.ROW_LIMIT = -1
                
                r0, d0 = wcs.wcs_pix2sky([[wcs.naxis1/2., wcs.naxis2/2.]], 1)[0]
                rll, dll = wcs.wcs_pix2sky([[0, 0]], 1)[0]
                corner_radius = np.sqrt((r0-rll)**2*np.cos(d0/360.*2*np.pi)**2+(d0-dll)**2)*60.*1.5
                #
                c = coord.ICRSCoordinates(ra=r0, dec=d0, unit=(u.deg, u.deg))
                #### something with astropy.coordinates
                # c.icrs.ra.degree = c.icrs.ra.degrees
                # c.icrs.dec.degree = c.icrs.dec.degrees
                #
                vt = Vizier.query_region(c, width=u.Quantity(corner_radius, u.arcminute), catalog=[VIZIER_CAT])[0]
                
            #### Make a region file
            ra_list, dec_list = vt['RAJ2000'], vt['DEJ2000']
            print('Vizier, found %d objects.' %(len(ra_list)))
            fp = open('%s.vizier.reg' %(ROOT_DIRECT),'w')
            fp.write('# %s, r=%.1f\'\nfk5\n' %(VIZIER_CAT, corner_radius))
            for ra, dec in zip(ra_list, dec_list):
                fp.write('circle(%.6f, %.6f, 0.5")\n' %(ra, dec))
            #
            fp.close()
        else:
            #### Use DS9 catalog
            ds9 = threedhst.dq.myDS9()
            ds9.set('file SCI.fits')
            #ds9.set('file Q0821+3107-F140W_drz.fits')
            ds9.set('catalog %s' %(REFERENCE_CATALOG))
            ### Can't find XPA access point for "copy to regions"
            ds9.set('catalog export tsv ds9_align.tsv')
            lines = open('ds9_align.tsv').readlines()
            ra_list, dec_list = [], []
            for line in lines[1:]:
                spl = line.split()
                ra, dec = float(spl[0]), float(spl[1])
                ra_list.append(ra)
                dec_list.append(dec)
            #
            del(ds9)
            
        x_image, y_image = [], []
        for ra, dec in zip(ra_list, dec_list):
            x, y = wcs.wcs_sky2pix([[ra, dec]], 1)[0]
            x_image.append(x)
            y_image.append(y)
        
        alignCat = catIO.EmptyCat()
        alignCat['X_IMAGE'] = np.array(x_image)
        alignCat['Y_IMAGE'] = np.array(y_image)
        
    else:
        ## alignment image
        se.options['CATALOG_NAME']    = 'align.cat'
        status = se.sextractImage(ROOT_DIRECT+'_align.fits')
        alignCat = threedhst.sex.mySexCat('align.cat')
    
    xshift = 0
    yshift = 0
    rot = 0
    scale = 1.
    
    xrms = 2
    yrms = 2
    
    NITER = 5
    IT = 0
    while (IT < NITER):
        IT = IT+1
        
        #### Get x,y coordinates of detected objects
        ## direct image
        fp = open('direct.xy','w')
        for i in range(len(directCat.X_IMAGE)):
            fp.write('%s  %s\n' %(directCat.X_IMAGE[i],directCat.Y_IMAGE[i]))
        fp.close()

        ## alignment image
        fp = open('align.xy','w')
        for i in range(len(alignCat.X_IMAGE)):
            fp.write('%s  %s\n' %(np.float(alignCat.X_IMAGE[i])+xshift,
                       np.float(alignCat.Y_IMAGE[i])+yshift))
        fp.close()

        iraf.flpr()
        iraf.flpr()
        iraf.flpr()
        #### iraf.xyxymatch to find matches between the two catalogs
        pow = toler*1.
        try:
            os.remove('align.match')
        except:
            pass
            
        status1 = iraf.xyxymatch(input="direct.xy", reference="align.xy",
                       output="align.match",
                       tolerance=2**pow, separation=0, verbose=yes, Stdout=1)
        
        nmatch = 0
        while status1[-1].startswith('0') | (nmatch < 10):
            pow+=1
            os.remove('align.match')
            status1 = iraf.xyxymatch(input="direct.xy", reference="align.xy",
                           output="align.match",
                           tolerance=2**pow, separation=0, verbose=yes, Stdout=1)
            #
            nmatch = 0
            for line in open('align.match'): nmatch += 1
            
        if verbose:
            for line in status1:
                print(line)
        
                
        #### Compute shifts with iraf.geomap
        iraf.flpr()
        iraf.flpr()
        iraf.flpr()
        try:
            os.remove("align.map")
        except:
            pass
            
        status2 = iraf.geomap(input="align.match", database="align.map",
                    fitgeometry=fitgeometry, interactive=no, 
                    xmin=INDEF, xmax=INDEF, ymin=INDEF, ymax=INDEF,
                    maxiter = 10, reject = 2.0, Stdout=1)
        if verbose:
            for line in status2:
                print(line)
        
        #fp = open(root+'.iraf.log','a')
        #fp.writelines(status1)
        #fp.writelines(status2)
        #fp.close()
                
        #### Parse geomap.output 
        fp = open("align.map","r")
        for line in fp.readlines():
            spl = line.split()
            if spl[0].startswith('xshift'):
                xshift += float(spl[1])    
            if spl[0].startswith('yshift'):
                yshift += float(spl[1])    
            if spl[0].startswith('xrotation'):
                rot = float(spl[1])    
            if spl[0].startswith('xmag'):
                scale = float(spl[1])    
            if spl[0].startswith('xrms'):
                xrms = float(spl[1])    
            if spl[0].startswith('yrms'):
                yrms = float(spl[1])    
            
        fp.close()
        
        #os.system('wc align.match')
        print('Shift iteration #%d, xshift=%f, yshift=%f, rot=%f, scl=%f (rms: %5.2f,%5.2f)' %(IT, xshift, yshift, rot, scale, xrms, yrms))
    
    im = pyfits.open('SCI.fits')
        
    shutil.copy('align.map',ROOT_DIRECT+'_align.map')
    shutil.copy('align.match',ROOT_DIRECT+'_align.match')
    
    #### Cleanup
    if clean:
        rmfiles = ['SCI.fits','WHT.fits','align.cat',
               'align.map','align.match','align.reg','align.xy',
               'direct.cat','direct.reg','direct.xy',
               'drz_sci.fits','drz_wht.fits','bg.fits']
        
        for file in rmfiles:
            try:
                os.remove(file)
            except:
                pass
        
    return xshift, yshift, rot, scale, xrms, yrms
Beispiel #43
0
def formaldehyde_mm_radex(xarr,
                          temperature=25,
                          column=13,
                          density=4,
                          xoff_v=0.0,
                          width=1.0,
                          grid_vwidth=1.0,
                          texgrid=None,
                          taugrid=None,
                          hdr=None,
                          path_to_texgrid='',
                          path_to_taugrid='',
                          debug=False,
                          verbose=False,
                          **kwargs):
    """
    Use a grid of RADEX-computed models to make a model line spectrum

    The RADEX models have to be available somewhere.
    OR they can be passed as arrays.  If as arrays, the form should be:
    texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))

    xarr must be a SpectroscopicAxis instance
    xoff_v, width are both in km/s


    Parameters
    ----------
    grid_vwidth : float
        the velocity assumed when computing the grid in km/s
        this is important because tau = modeltau / width (see, e.g., 
        Draine 2011 textbook pgs 219-230)
    density : float
        Density!
    """

    if texgrid is None and taugrid is None:
        if path_to_texgrid == '' or path_to_taugrid == '':
            raise IOError("Must specify model grids to use.")
        else:
            taugrid = [pyfits.getdata(path_to_taugrid)]
            texgrid = [pyfits.getdata(path_to_texgrid)]
            hdr = pyfits.getheader(path_to_taugrid)
            zinds, yinds, xinds = np.indices(taugrid[0].shape)
            if 'CD1_1' in hdr:
                cd11 = 'CD1_1'
                cd22 = 'CD2_2'
            else:
                cd11 = 'CDELT1'
                cd22 = 'CDELT2'
            densityarr = (xinds + hdr['CRPIX1'] -
                          1) * hdr[cd11] + hdr['CRVAL1']  # log density
            columnarr = (yinds + hdr['CRPIX2'] -
                         1) * hdr[cd22] + hdr['CRVAL2']  # log column
            temparr = (zinds + hdr['CRPIX3'] -
                       1) * hdr['CDELT3'] + hdr['CRVAL3']  # lin temperature
            minfreq = (218., )
            maxfreq = (219., )
    elif len(taugrid) == len(texgrid) and hdr is not None:
        minfreq, maxfreq, texgrid = zip(*texgrid)
        minfreq, maxfreq, taugrid = zip(*taugrid)
        zinds, yinds, xinds = np.indices(taugrid[0].shape)
        if 'CD1_1' in hdr:
            cd11 = 'CD1_1'
            cd22 = 'CD2_2'
        else:
            cd11 = 'CDELT1'
            cd22 = 'CDELT2'
        densityarr = (xinds + hdr['CRPIX1'] -
                      1) * hdr[cd11] + hdr['CRVAL1']  # log density
        columnarr = (yinds + hdr['CRPIX2'] -
                     1) * hdr[cd22] + hdr['CRVAL2']  # log column
        temparr = (zinds + hdr['CRPIX3'] -
                   1) * hdr['CDELT3'] + hdr['CRVAL3']  # lin temperature
    else:
        raise Exception

    # Convert X-units to frequency in GHz
    xarr = xarr.as_unit('Hz', quiet=True)

    #tau_nu_cumul = np.zeros(len(xarr))

    gridval1 = np.interp(density, densityarr[0, 0, :], xinds[0, 0, :])
    gridval2 = np.interp(column, columnarr[0, :, 0], yinds[0, :, 0])
    gridval3 = np.interp(temperature, temparr[:, 0, 0], zinds[:, 0, 0])
    if np.isnan(gridval1) or np.isnan(gridval2) or np.isnan(gridval3):
        raise ValueError("Invalid column/density")

    if scipyOK:
        # this is mostly a trick for speed: slice so you only have two thin layers to interpolate
        # between
        #slices = [density_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
        slices = [
            slice(np.floor(gridval3),
                  np.floor(gridval3) + 2),
            slice(np.floor(gridval2),
                  np.floor(gridval2) + 2),
            slice(np.floor(gridval1),
                  np.floor(gridval1) + 2)
        ]
        tau = [
            scipy.ndimage.map_coordinates(tg[slices],
                                          np.array([[gridval3 % 1],
                                                    [gridval2 % 1],
                                                    [gridval1 % 1]]),
                                          order=1) for tg in taugrid
        ]
        tex = [
            scipy.ndimage.map_coordinates(tg[slices],
                                          np.array([[gridval3 % 1],
                                                    [gridval2 % 1],
                                                    [gridval1 % 1]]),
                                          order=1) for tg in texgrid
        ]
    else:
        raise ImportError(
            "Couldn't import scipy, therefore cannot interpolate")
    #tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
    #tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])

    if verbose:
        for ta, tk in zip(tau, tex):
            print(
                "density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g"
                % (density, temperature, column, ta, tk))

    if debug:
        import pdb
        pdb.set_trace()

    spec = np.sum([(formaldehyde_mm_vtau(xarr,
                                         Tex=float(tex[ii]),
                                         tau=float(tau[ii]),
                                         xoff_v=xoff_v,
                                         width=width,
                                         **kwargs) *
                    (xarr.as_unit('GHz').value > minfreq[ii]) *
                    (xarr.as_unit('GHz').value < maxfreq[ii]))
                   for ii in xrange(len(tex))],
                  axis=0)

    return spec
Beispiel #44
0
def cutout_id_chem_map(yslice=slice(367,467), xslice=slice(114,214),
                       vrange=[51,60]*u.km/u.s, sourcename='e2',
                       filelist=glob.glob(paths.dpath('12m/cutouts/*e2e8*fits')),
                       source=None, radius=None,
                       molecular_database=ch3oh,
                       radiative_transitions=rt,
                       frqs=frqs,
                       chem_name='CH3OH',
                       shape=None, # check that shape matches slice
                      ):
    assert filelist

    maps = {}
    map_error = {}
    energies = {}
    degeneracies = {}
    frequencies = {}
    indices = {}

    # sanity check
    #shapes = [fits.getdata(fn).shape for fn in filelist]
    #assert len(set(shapes)) == 1

    for ii,fn in enumerate(ProgressBar(filelist)):
        if chem_name not in fn:
            log.debug("Skipping {0} because it doesn't have {1}".format(fn, chem_name))
            continue
        if 'temperature' in fn or 'column' in fn:
            continue

        if 'moment0' in fn:
            # there is a slight danger of off-by-one-pixel errors with the
            # cropping used here.  Ideally, we'd reproject...
            m0_full = fits.getdata(fn)
            #stddev = fits.getdata(fn.replace("moment0","madstd"))
            header = fits.getheader(fn)
            cutout = Cutout2D(m0_full, source, 2*radius, wcs=wcs.WCS(header))
            m0 = cutout.data
            #cutout_std = Cutout2D(stddev, source, 2*radius, wcs=wcs.WCS(header))
            #stddev = cutout_std.data
            try:
                beam = radio_beam.Beam.from_fits_header(header)
                jtok = beam.jtok(header['RESTFRQ']*u.Hz).value
            except:
                jtok = 222. # approximated 0.32x0.34 at 225 GHz

            m0 = m0 * jtok
            # stddev = error in a single channel... not very accurate
            #stddev = stddev * jtok
            print("stddev for {0}".format(fn)) # this debug statement prevents abort traps
            stddev = mad_std(m0_full[np.isfinite(m0_full)]) * jtok
            header = cutout.wcs.to_header()
        else:
            cube_ = SpectralCube.read(fn)
            if shape:
                # have to have same shapes, otherwise pixel slices don't make sense
                assert cube_.shape == shape
            cube = cube_[:,yslice,xslice]
            bm = cube.beams[0]
            #jtok = bm.jtok(cube.wcs.wcs.restfrq*u.Hz)
            cube = cube.to(u.K, bm.jtok_equiv(cube.wcs.wcs.restfrq*u.Hz))

            slab = cube.spectral_slab(*vrange)
            cube.beam_threshold = 1
            #contguess = cube.spectral_slab(0*u.km/u.s, 40*u.km/u.s).percentile(50, axis=0)
            #contguess = cube.spectral_slab(70*u.km/u.s, 100*u.km/u.s).percentile(50, axis=0)
            mask = (cube.spectral_axis<40*u.km/u.s) | (cube.spectral_axis > 75*u.km/u.s)
            contguess = cube.with_mask(mask[:,None,None]).percentile(30, axis=0)
            stddev = cube.with_mask(mask[:,None,None]).apply_numpy_function(mad_std, axis=0)
            slabsub = (slab-contguess)
            slabsub.beam_threshold = 0.15
            m0 = slabsub.moment0()
            header = m0.hdu.header

        label = linere.search(fn).groups()[0]
        frq = name_to_freq[label]

        closest_ind = np.argmin(np.abs(frqs - frq))
        closest_key = list(radiative_transitions.keys())[closest_ind]
        closest_rt = radiative_transitions[closest_key]
        upperstate = molecular_database.data['States'][closest_rt.UpperStateRef]
        upperen = u.Quantity(float(upperstate.StateEnergyValue),
                             unit=upperstate.StateEnergyUnit)

        maps[label] = m0
        map_error[label] = stddev
        energies[label] = upperen
        degeneracies[label] = int(upperstate.TotalStatisticalWeight)
        indices[label] = closest_ind
        frequencies[label] = frq

    # make sure the dict indices don't change order
    energy_to_key = {v:k for k,v in energies.items()}
    order = sorted(energy_to_key.keys())
    keys = [energy_to_key[k] for k in order]

    cube = np.empty((len(maps),)+maps[label].shape)
    ecube = np.empty_like(cube)
    xaxis = u.Quantity([energies[k] for k in keys])
    xaxis = xaxis.to(u.erg, u.spectral()).to(u.K, u.temperature_energy())
    for ii,key in enumerate(keys):
        # divide by degeneracy
        cube[ii,:,:] = maps[key]
        ecube[ii,:,:] = map_error[key]

    frequencies = u.Quantity([frequencies[k] for k in keys])
    indices = [indices[k] for k in keys]
    degeneracies = [degeneracies[k] for k in keys]

    assert xaxis.size == cube.shape[0]

    return xaxis,cube,ecube,maps,map_error,energies,frequencies,indices,degeneracies,header
def process(P, F, stamp=False, plot=False):
    '''P is an int refering to the Par being processed
    F is the int 110 or 160; it sets G to 102 or 141, respectively
    if stamp is True, stamps of all the bright objects will be cut and put in a directory called stamps
    if plot is True, a pdf showing the steps of the processing will be created and put in a directory called stages'''

    if plot:
        logging.info('Plotting enabled')

    if F == 110:
        G = 102
    elif F == 160:
        G = 141
    else:
        raise ValueError('F==%i; should be 110 or 160' % F)

    imageDir = 'Par%i/DATA/DIRECT_GRISM' % P
    stampDir = '%s/F%i_stamps' % (imageDir, F)
    grismDir = 'Par%i/G%i_DRIZZLE' % (P, G)
    subgrismDir = '%s/subtracted' % grismDir
    modelDir = '%s/contaminationModels' % subgrismDir
    stageDir = '%s/processingStages' % subgrismDir
    detailsDir = '%s/detailedPlots' % subgrismDir
    if not os.path.exists(stampDir):
        os.mkdir(stampDir)
    if not os.path.exists(subgrismDir):
        os.mkdir(subgrismDir)
    if not os.path.exists(modelDir):
        os.mkdir(modelDir)
    if not os.path.exists(stageDir):
        os.mkdir(stageDir)
    if not os.path.exists(detailsDir):
        os.mkdir(detailsDir)

    #load image data
    image = '%s/F%iW_sci.fits' % (imageDir, F)
    img = fits.getdata(image, 0)
    header = fits.open(image)[0].header
    catalog = getCatalog('%s/fin_F%i.cat' % (imageDir, F))
    distortionFactor = .08 / 0.128254

    #separate list of galaxies by brightness cut-off
    magcutFaint = 24  #these are too faint to bother with
    magcutBright = 20  #point sources (stars) require extra-large stamps
    faint = catalog[catalog['MAG_F1153W'] > magcutFaint]
    stars = catalog[(catalog['MAG_F1153W'] <= magcutBright)
                    & (catalog['CLASS_STAR'] > 0.1)]
    bright = catalog[(catalog['MAG_F1153W'] <= magcutFaint)
                     & ((catalog['MAG_F1153W'] > magcutBright)
                        | (catalog['CLASS_STAR'] <= 0.1))]
    #sb = np.hstack((bright, stars))

    #cut stamps
    if stamp or not os.path.exists('%s/stamp1.fits' % stampDir):
        print 'Cutting stamps'
        for entry in stars:
            cutStamp(img, header, stampDir, entry, catalog, 2)
        for entry in np.hstack((bright, faint)):
            cutStamp(img, header, stampDir, entry, catalog)

    for entry in bright:

        #find all entries that might overlap
        startTime = time.time()
        grism = '%s/aXeWFC3_G%i_mef_ID%i.fits' % (grismDir, G, entry['NUMBER'])
        if not os.path.exists(grism):
            print 'Skipping object %i; file not found: %s' % (entry['NUMBER'],
                                                              grism)
        else:
            print 'grism %i' % entry['NUMBER']
            logging.info('grism %i' % entry['NUMBER'])
            gimg = fits.getdata(grism, 1)
            gerr = fits.getdata(grism, 2)
            gheader = fits.getheader(grism, 1)
            gdx = gheader['NAXIS1']
            gdy = gheader['NAXIS2']
            gyRef = gheader['CRPIX2']

            #find contaminants to have own profiles calculated
            cond1 = entry['NUMBER'] != bright['NUMBER']
            cond2 = entry['Y_IMAGE'] + gdy / 2. >= bright['Y_IMAGE'] - bright[
                'A_IMAGE']
            cond3 = entry['Y_IMAGE'] - gdy / 2. <= bright['Y_IMAGE'] + bright[
                'A_IMAGE']
            cond4 = entry['X_IMAGE'] - gdx / 2. <= bright['X_IMAGE'] + gdx / 2.
            cond5 = bright['X_IMAGE'] + gdx / 2. <= entry['X_IMAGE'] + gdx / 2.
            cond6 = entry['X_IMAGE'] - gdx / 2. <= bright['X_IMAGE'] - gdx / 2.
            cond7 = bright['X_IMAGE'] - gdx / 2. <= entry['X_IMAGE'] + gdx / 2.
            contams = bright[cond1 & cond2 & cond3 & ((cond4 & cond5) |
                                                      (cond6 & cond7))]
            print 'contaminants: %s' % contams['NUMBER']
            logging.info('contaminants: %s' % contams['NUMBER'])

            #get main profile
            stamp = '%s/stamp%i.fits' % (stampDir, entry['NUMBER'])
            simg = fits.getdata(stamp, 0)
            sheader = fits.getheader(stamp, 0)
            sdx = sheader['NAXIS1']
            sdy = sheader['NAXIS2']
            stripe = []  #linear profile starting from bottom of stamp
            for row in simg:
                val = 0
                for x, col in enumerate(row):
                    if sdx / 3. <= x <= sdx * 2 / 3.:  #use middle third of profile
                        val += col
                stripe.append(val)

            #extend stripe to height of grism
            speakLocationY = (np.where(stripe == max(stripe))[0][0])
            profile = np.zeros(gdy)
            profile[gyRef - speakLocationY:gyRef - speakLocationY +
                    sdy] = stripe

            #normalize and interpolate the profile
            profileInterp = interpolate.interp1d(xrange(gdy),
                                                 profile / gdy,
                                                 kind='linear',
                                                 bounds_error=False,
                                                 fill_value=0)
            pRange = np.array(xrange(100 * (gdy - 1) + 1)) / 100.
            profile = profileInterp(pRange)

            #get contaminating profiles and relative x range
            c_bounds, c_profiles, c_profileInterps = {}, {}, {}
            for contam in contams:
                c_stamp = '%s/stamp%i.fits' % (stampDir, contam['NUMBER'])
                c_simg = fits.getdata(c_stamp, 0)
                c_sheader = fits.getheader(c_stamp, 0)
                c_sdx = c_sheader['NAXIS1']
                c_sdy = c_sheader['NAXIS2']
                c_xoffset = (contam['X_IMAGE'] -
                             entry['X_IMAGE']) * distortionFactor
                c_yoffset = (contam['Y_IMAGE'] -
                             entry['Y_IMAGE']) * distortionFactor
                c_stripe = []  #linear profile starting from bottom of stamp
                for row in c_simg:
                    val = 0
                    for x, col in enumerate(row):
                        if c_sdx / 3. <= x <= c_sdx * 2 / 3.:  #use middle third of profile
                            val += col
                    c_stripe.append(val)

                #make the stripe a spline to allow subpixelling
                c_spline = interpolate.interp1d(xrange(c_sdy),
                                                c_stripe,
                                                kind='linear',
                                                bounds_error=False,
                                                fill_value=0)
                c_fineRange = np.array(xrange(100 * c_sdy)) / 100.
                c_splineVals = c_spline(c_fineRange)

                #extend stripe to height of grism
                c_speakLocationY = (np.where(c_stripe == max(c_stripe))[0][0]
                                    )  #use data, not interpolation
                c_profile = np.zeros(100 * gdy)
                left = int(round((gyRef - c_speakLocationY + c_yoffset) * 100))
                right = int(
                    round(
                        (gyRef - c_speakLocationY + c_sdy + c_yoffset) * 100))
                c_profile[max(left, 0):min(right, gdy * 100)] = c_splineVals[
                    -min(left, 0):min(right, gdy * 100) - left]

                #interpolate the profile
                c_profileInterp = interpolate.interp1d(
                    np.arange(len(c_profile)) / 100.,
                    c_profile / gdy,
                    kind='linear',
                    bounds_error=False,
                    fill_value=0)
                c_profile = c_profileInterp(pRange)

                #determine to where in grism the contamination may extend
                #assumes tophat prior; use grism response curve eventually
                cdx = [
                    max(0, c_xoffset - contam['A_IMAGE']),
                    min(c_xoffset + gimg.shape[1] + contam['A_IMAGE'], gdx - 1)
                ]

                c_bounds[contam['NUMBER']] = cdx
                c_profiles[contam['NUMBER']] = c_profile
                c_profileInterps[contam['NUMBER']] = c_profileInterp

            #code to compress whole grism
            gimgMasked = np.ma.masked_array(gimg,
                                            mask=(gerr == 0),
                                            fill_value=np.NaN)
            gerrMasked = np.ma.masked_array(gerr,
                                            mask=(gerr == 0),
                                            fill_value=np.NaN)
            gColumnTotal = np.ma.average(gimgMasked, axis=-1)
            gErrColumnTotal = np.sqrt(
                np.ma.average(np.square(gerrMasked), axis=-1))
            gColumnTotalFine = interpolate.interp1d(xrange(gdy),
                                                    gColumnTotal,
                                                    kind='linear',
                                                    bounds_error=False,
                                                    fill_value=0)(pRange)
            gErrColumnTotalFine = interpolate.interp1d(xrange(gdy),
                                                       gErrColumnTotal,
                                                       kind='linear',
                                                       bounds_error=False,
                                                       fill_value=0)(pRange)
            gColumnTotalFine = np.ma.masked_array(
                gColumnTotalFine,
                mask=(gErrColumnTotalFine == 0),
                fill_value=np.NaN)
            gErrColumnTotalFine = np.ma.masked_array(
                gErrColumnTotalFine,
                mask=(gErrColumnTotalFine == 0),
                fill_value=np.NaN)

            #list all the profiles
            profiles = [profileInterp]
            for c in c_profileInterps:
                profiles.append(c_profileInterps[c])

            #find the pixel offsets for the whole object
            weights = [1, 0] * len(
                profiles)  #both amplitudes and dy pixel offsets can vary
            b = [(0, 10), (-2, 2)] * len(
                profiles
            )  #bound amplitudes to be nonnegative, pixel offset within 2
            st = time.time()
            minimization = optimize.minimize(residualVaryOffset,
                                             weights,
                                             args=(gColumnTotalFine,
                                                   gErrColumnTotalFine,
                                                   profiles, gdy),
                                             bounds=b)
            et = time.time()
            logging.info(minimization)
            logging.info('Time to optimize: %f' % (et - st))
            offsets = minimization['x'][1::2]
            if not minimization['success']:
                print 'Overall minimization unsuccessful'

            #plot the data and models
            if plot:
                plotDir = '%s/%i' % (detailsDir, entry['NUMBER'])
                if not os.path.exists(plotDir):
                    os.mkdir(plotDir)
                totalSuccess = minimization['success']
                wpa = makeModels(profiles, minimization['x'][0::2], offsets,
                                 gdy)
                plotCompressedGrism(gimg, pRange, gColumnTotalFine, wpa,
                                    minimization, plotDir)

            #determine contributions due to each object via chi^2 minimization
            #each column point is the mean of the three points centered around its index
            contamGimg = np.ma.masked_array(np.zeros(gimg.shape),
                                            mask=np.ma.getmask(gimgMasked))
            subtractGimg = np.ma.copy(gimgMasked)
            for x in xrange(gdx):
                if x == 0:  #nothing left of first index
                    temp = np.ma.masked_array(
                        [gimgMasked.T[x], gimgMasked.T[x + 1]])
                    tempErr = np.ma.masked_array(
                        [gerrMasked.T[x], gerrMasked.T[x + 1]])
                elif x == gdx - 1:  #nothing right of last index
                    temp = np.ma.masked_array(
                        [gimgMasked.T[x - 1], gimgMasked.T[x]])
                    tempErr = np.ma.masked_array(
                        [gerrMasked.T[x - 1], gerrMasked.T[x]])
                else:
                    temp = np.ma.masked_array([
                        gimgMasked.T[x - 1], gimgMasked.T[x],
                        gimgMasked.T[x + 1]
                    ])
                    tempErr = np.ma.masked_array([
                        gerrMasked.T[x - 1], gerrMasked.T[x],
                        gerrMasked.T[x + 1]
                    ])

                gColumn = np.ma.average(temp.T, axis=-1)
                gErrColumn = np.sqrt(np.ma.average(tempErr.T, axis=-1))
                gColumnFine = interpolate.interp1d(xrange(gdy),
                                                   gColumn,
                                                   kind='linear',
                                                   bounds_error=False,
                                                   fill_value=0)(pRange)
                gErrColumnFine = interpolate.interp1d(xrange(gdy),
                                                      gErrColumn,
                                                      kind='linear',
                                                      bounds_error=False,
                                                      fill_value=0)(pRange)
                gColumnFine = np.ma.masked_array(gColumnFine,
                                                 mask=(gErrColumnFine == 0),
                                                 fill_value=np.NaN)
                gErrColumnFine = np.ma.masked_array(gErrColumnFine,
                                                    mask=(gErrColumnFine == 0),
                                                    fill_value=np.NaN)

                #determine which contaminants may be contaminating the grism
                profiles = [profileInterp]
                for c in contams['NUMBER']:
                    if c_bounds[c][0] <= x <= c_bounds[c][1]:
                        profiles.append(c_profileInterps[c])

                #determine the weights of each profile and calculate their contributions
                weights = [1] * len(
                    profiles)  #both amplitudes and dy pixel offsets can vary
                b = [(0, 10)] * len(
                    profiles
                )  #bound amplitudes to be nonnegative, pixel offset within 2
                minimization = optimize.minimize(residualConstOffset,
                                                 weights,
                                                 args=(offsets, gColumnFine,
                                                       gErrColumnFine,
                                                       profiles, gdy),
                                                 bounds=b)
                if not minimization['success']:
                    print 'Minimization unsuccessful at x=%i' % x
                    logging.info('Minimization unsuccessful at x=%i' % x)
                    #raise Warning(minimization)

                weights = minimization['x']
                weightedProfArrays = makeModels(profiles, weights, offsets,
                                                gdy)
                logging.info('At x = %i: weights = %s, success = %s' %
                             (x, weights, minimization['success']))

                #plot the data and models
                if plot and not x % (gdx / 10):  #only plot 10 x values
                    plotIndividualWavelengths(pRange, gColumnFine,
                                              weightedProfArrays, minimization,
                                              x, plotDir)

                #subtract each contaminating profile from the grism
                for y, ySlice in enumerate(gimg):
                    for profNum, prof in enumerate(weightedProfArrays):
                        if profNum and gimg[y, x]:
                            subtractGimg[y, x] -= prof[::100][y]

                #creates a contamination model
                for y, val in enumerate(gimg):
                    for profNum, prof in enumerate(weightedProfArrays):
                        if gimg[y, x]:
                            contamGimg[y, x] += prof[::100][y]

            #save the new grism
            subtractFile = '%s/aXeWFC3_G%i_mef_ID%i_subtracted.fits' % (
                subgrismDir, G, entry['NUMBER'])
            fits.writeto(subtractFile,
                         data=subtractGimg.data,
                         header=gheader,
                         clobber=True)

            if plot:
                contamFile = '%s/contam%i.fits' % (modelDir, entry['NUMBER'])
                fits.writeto(contamFile,
                             data=contamGimg.data,
                             header=gheader,
                             clobber=True)
                plotFinalResults(entry, img, totalSuccess, profile, c_profiles,
                                 pRange, gimgMasked, gColumnTotalFine,
                                 contamGimg, wpa, subtractGimg, stageDir)

        endTime = time.time()
        logging.info('Total time for object %i: %f' %
                     (entry['NUMBER'], endTime - startTime))
Beispiel #46
0
def refine_shifts(ROOT_DIRECT='f160w',
                  ALIGN_IMAGE='../../ACS/h_sz*drz_img.fits',
                  fitgeometry='shift', clean=True,
                  ALIGN_EXTENSION=0, shift_params=None,
                  toler=3, maxtoler=5, align_sdss_ds9=False,
                  verbose=False):
    """
refine_shifts(ROOT_DIRECT='f160w',
              ALIGN_IMAGE='../../ACS/h_sz*drz_img.fits',
              fitgeometry='shift', clean=True)
                
    Refine shifts by catalog matching an input multidrizzle image, 
    ROOT_DIRECT+'_drz.fits' to one or more alignment images
    """
        
    run = threedhst.prep_flt_files.MultidrizzleRun(ROOT_DIRECT.upper())
    
    ## radius for match is 2**toler.  Make it larger if fit comes out bad
    #toler, maxtoler = 3, 5  
    iter, MAXIT = 0, 5
    xrms, yrms = 100, 100
    if shift_params is not None:
        xshift, yshift, rot, scale = shift_params
        threedhst.showMessage('Using specified DRZ-frame shifts: %f %f %f %f' %(xshift, yshift, rot, scale))
    else:
        threedhst.showMessage('Aligning WCS to %s (%s)' %(threedhst.options['ALIGN_IMAGE'], fitgeometry))
        while ((xrms > 1) | (yrms > 1)) & (toler <= maxtoler) & (iter < MAXIT):
            iter = iter + 1
            xshift, yshift, rot, scale, xrms, yrms = threedhst.shifts.align_to_reference(
                        ROOT_DIRECT,
                        ALIGN_IMAGE,
                        fitgeometry=fitgeometry, clean=clean,
                        ALIGN_EXTENSION=ALIGN_EXTENSION,
                        toler=toler, skip_swarp=(toler > 3),
                        align_sdss_ds9=align_sdss_ds9, verbose=verbose)
            toler+=1

    #### shifts measured in DRZ frame.  Translate to FLT frame
    drz = pyfits.open(ROOT_DIRECT+'_drz.fits')
    #alpha = (180.-drz[1].header['PA_APER'])/360.*2*np.pi
    #### Get reference angle from first image in the ASN file
    asn = threedhst.utils.ASNFile(ROOT_DIRECT+'_asn.fits')
    alpha = (180.-pyfits.getheader(asn.exposures[0]+'_flt.fits',1)['PA_APER'])/360.*2*np.pi
    
    xsh = (xshift*np.cos(alpha)-yshift*np.sin(alpha))*np.float(run.scl)
    ysh = (xshift*np.sin(alpha)+yshift*np.cos(alpha))*np.float(run.scl)

    print('Final shift:', xsh, ysh, drz[1].header['PA_APER'])
    fp = open(ROOT_DIRECT+'_align.info','w')
    fp.write('%s %8.3f %8.3f %8.3f\n' %(ALIGN_IMAGE, xsh, ysh, rot)) 
    fp.close()
    
    #### Read the shiftfile
    shiftF = threedhst.shifts.ShiftFile(ROOT_DIRECT+'_shifts.txt')
            
    #### Apply the alignment shifts to the shiftfile
    shiftF.xshift = list(np.array(shiftF.xshift)-xsh)
    shiftF.yshift = list(np.array(shiftF.yshift)-ysh)
    shiftF.rotate = list((np.array(shiftF.rotate)+rot) % 360)
    shiftF.scale = list(np.array(shiftF.scale)*scale)
    
    shiftF.write(ROOT_DIRECT+'_shifts.txt')
def get_many_k93model():

    SEDfile_dir = os.path.join(top_pysynphot_data_dir, dir_nostar,
                               dir_submodels[8])
    all_sed = []

    filelist1 = os.listdir(SEDfile_dir + '/km01')
    filelist2 = os.listdir(SEDfile_dir + '/km02')
    filelist3 = os.listdir(SEDfile_dir + '/km03')
    filelist4 = os.listdir(SEDfile_dir + '/km05')
    filelist5 = os.listdir(SEDfile_dir + '/km10')
    filelist6 = os.listdir(SEDfile_dir + '/km20')
    filelist7 = os.listdir(SEDfile_dir + '/km25')
    filelist8 = os.listdir(SEDfile_dir + '/km30')
    filelist9 = os.listdir(SEDfile_dir + '/km35')
    filelist10 = os.listdir(SEDfile_dir + '/km40')
    filelist11 = os.listdir(SEDfile_dir + '/km45')
    filelist12 = os.listdir(SEDfile_dir + '/km50')
    filelist13 = os.listdir(SEDfile_dir + '/kp00')
    filelist14 = os.listdir(SEDfile_dir + '/kp01')
    filelist15 = os.listdir(SEDfile_dir + '/kp02')
    filelist16 = os.listdir(SEDfile_dir + '/kp03')
    filelist17 = os.listdir(SEDfile_dir + '/kp05')
    filelist18 = os.listdir(SEDfile_dir + '/kp10')

    filelist1.remove('AA_README')
    filelist2.remove('AA_README')
    filelist3.remove('AA_README')
    filelist4.remove('AA_README')
    filelist5.remove('AA_README')
    filelist6.remove('AA_README')
    filelist7.remove('AA_README')
    filelist8.remove('AA_README')
    filelist9.remove('AA_README')
    filelist10.remove('AA_README')
    filelist11.remove('AA_README')
    filelist12.remove('AA_README')
    filelist13.remove('AA_README')
    filelist14.remove('AA_README')
    filelist15.remove('AA_README')
    filelist16.remove('AA_README')
    filelist17.remove('AA_README')
    filelist18.remove('AA_README')

    filelist=filelist1 + filelist2 + filelist3 + filelist4 + filelist5+ filelist6 + filelist7 + filelist8 + filelist9 + \
filelist10 + filelist11 + filelist12 + filelist13 + filelist14 + filelist15+ filelist16 + filelist17 + filelist18

    filelist1_group = [
        os.path.join('km01', f) for f in filelist1 if f.endswith('.fits')
    ]
    filelist2_group = [
        os.path.join('km02', f) for f in filelist2 if f.endswith('.fits')
    ]
    filelist3_group = [
        os.path.join('km03', f) for f in filelist3 if f.endswith('.fits')
    ]
    filelist4_group = [
        os.path.join('km05', f) for f in filelist4 if f.endswith('.fits')
    ]
    filelist5_group = [
        os.path.join('km10', f) for f in filelist5 if f.endswith('.fits')
    ]
    filelist6_group = [
        os.path.join('km20', f) for f in filelist6 if f.endswith('.fits')
    ]
    filelist7_group = [
        os.path.join('km25', f) for f in filelist7 if f.endswith('.fits')
    ]
    filelist8_group = [
        os.path.join('km30', f) for f in filelist8 if f.endswith('.fits')
    ]
    filelist9_group = [
        os.path.join('km35', f) for f in filelist9 if f.endswith('.fits')
    ]
    filelist10_group = [
        os.path.join('km40', f) for f in filelist10 if f.endswith('.fits')
    ]
    filelist11_group = [
        os.path.join('km45', f) for f in filelist11 if f.endswith('.fits')
    ]
    filelist12_group = [
        os.path.join('km50', f) for f in filelist12 if f.endswith('.fits')
    ]
    filelist13_group = [
        os.path.join('kp00', f) for f in filelist13 if f.endswith('.fits')
    ]
    filelist14_group = [
        os.path.join('kp01', f) for f in filelist14 if f.endswith('.fits')
    ]
    filelist15_group = [
        os.path.join('kp02', f) for f in filelist15 if f.endswith('.fits')
    ]
    filelist16_group = [
        os.path.join('kp03', f) for f in filelist16 if f.endswith('.fits')
    ]
    filelist17_group = [
        os.path.join('kp05', f) for f in filelist17 if f.endswith('.fits')
    ]
    filelist18_group = [
        os.path.join('kp10', f) for f in filelist18 if f.endswith('.fits')
    ]

    filelist_group=filelist1_group + filelist2_group + filelist3_group + filelist4_group + filelist5_group+ \
filelist6_group + filelist7_group + filelist8_group + filelist9_group + filelist10_group + filelist11_group + filelist12_group + filelist5_group+ \
filelist13_group + filelist14_group + filelist15_group + filelist16_group + filelist17_group + filelist18_group

    fits_files = filelist_group

    obj_headers = []
    obj_files = []
    for filename in fits_files:
        index = 0
        if re.search('fits', filename):  #example of filename filter
            index += 1
            fullfilename = os.path.join(SEDfile_dir, filename)
            hdr = fits.getheader(fullfilename)
            obj_headers.append(hdr)
            obj_files.append(filename)

    obj_temperatures = []
    obj_log_z_all = []
    index = 0
    for hdr in obj_headers:
        obj_temp = obj_headers[index]['TEFF']
        obj_logz = obj_headers[index]['LOG_Z']
        obj_temperatures.append(obj_temp)
        obj_log_z_all.append(obj_logz)
        index += 1

    obj_names2 = []
    index = 0
    obj_temp = []

    for thefile in fits_files:
        #thenames=re.findall('^bk_([a-z][0-9]+).fits$',thefile)
        thenames = re.findall('([a-z].+_[0-9].+).fits$', thefile)
        temp = re.findall('([a-z].+_[0-9].+).fits$', thefile)
        if (len(thenames) > 0):
            obj_names2.append(thenames[0])
        else:
            print 'bad file ', thefile
        index += 1

    obj_names = obj_names2
    obj_files = filelist_group

    objames_and_objfiles = zip(obj_names, obj_files)
    objames_and_objtemp = zip(obj_names, obj_temperatures)
    objtemp_and_objlogz = zip(obj_temperatures, obj_log_z_all)
    #all_logg=np.array([0.0,1.,2.,3.,4.])  # gravity
    all_logg = np.array([0.0])  # gravity

    for temp, logz in objtemp_and_objlogz:
        #Icat(model,temp,logz,logg)
        for logg in all_logg:
            sed = S.Icat('k93models', temp, logz, logg)
            sed.convert('flam')  # to be sure every spectrum is in flam unit
            all_sed.append(sed)

    return all_sed
Beispiel #48
0
def telescope(img):
    import string
    from astropy.io import fits as pyfits

    telescopelist = [
        'ekar', 'pennar', 'TNG', 'NOT', 'ACAM', 'WHT', 'lo', 'wise', 'ca',
        'FORS1', 'FORS2', 'NTT', 'lp', 'wise', 'rem', 'CI', 'sub', 'mag',
        'FTN', 'danish', 'SOFI', 'NICS', 'ctio', 'montsec', 'hct', 'trp',
        'k61', 'b155', 'DFOT', 'ST'
    ]
    geth = pyfits.getheader(img)
    try:
        _telescope = geth['TELESCOP']
        if string.find(_telescope, 'Ekar') != -1:
            _telescope = 'ekar'
        elif string.find(_telescope, 'Reflector') != -1:
            _telescope = 'ekar'
        elif string.find(_telescope, 'Schmidt') != -1:
            _telescope = 'sch'
        elif string.find(_telescope, 'bigelow') != -1:
            _telescope = 'b155'
        elif string.find(_telescope, 'Kuiper') != -1:
            _telescope = 'k61'
        elif string.find(_telescope, 'ASIAGO') != -1:
            _telescope = 'pennar'
        elif string.find(_telescope, 'TNG') != -1:
            if string.find(geth['INSTRUME'], 'NICS') != -1:
                _telescope = 'NICS'
            else:
                _telescope = 'TNG'
        elif string.find(_telescope, 'NOT') != -1:
            _telescope = 'NOT'
        elif string.find(_telescope, 'Danish') != -1:
            _telescope = 'danish'
        elif string.find(_telescope, 'WHT') != -1:
            _instrum = geth['INSTRUME']
            if _instrum == 'ACAM':
                _telescope = 'ACAM'
            else:
                _telescope = 'WHT'
        elif string.find(_telescope, 'Orzale') != -1:
            _telescope = 'lo'
        elif string.find(_telescope, '40 INCH') != -1:
            _telescope = 'wise'
        elif string.find(_telescope, 'CA-2.2') != -1:
            _telescope = 'ca'
        elif _telescope == 'ca':
            _telescope = 'ca'
        elif string.find(_telescope, 'Subaru') != -1:
            _telescope = 'sub'
        elif string.find(_telescope, 'K.A.I.T.') != -1:
            _telescope = 'kait'
        elif string.find(_telescope, 'Clay_Mag') != -1:
            _telescope = 'mag'
        elif string.find(_telescope, 'ESO-VLT-U1') != -1:
            _telescope = 'FORS2'
        elif string.find(_telescope, 'ESO-VLT-U2') != -1:
            _telescope = 'FORS1'
        elif string.find(_telescope, 'ESO-NTT') != -1:
            if string.find(geth['INSTRUME'], 'SOFI') != -1:
                _telescope = 'SOFI'
            else:
                _telescope = 'NTT'
        elif string.find(_telescope, 'Wendelstein') != -1:
            _telescope = 'wend'
        elif string.find(_telescope, 'Liverpool') != -1:
            _telescope = 'lp'
        elif string.find(_telescope, 'REM') != -1:
            _telescope = 'rem'
        elif string.find(_telescope, 'AZT-24') != -1:
            _telescope = 'CI'
        elif string.find(_telescope, 'Prompt') != -1:
            _telescope = 'prompt'
        elif string.find(_telescope, 'Faulkes') != -1:
            if string.find(_telescope, 'North') != -1:
                _instrume = geth['instrume']
                if _instrume == 'EM01':
                    _telescope = 'mer'
                else:
                    _telescope = 'FTN'
            elif string.find(_telescope, 'South') != -1:
                _telescope = 'FTS'
        elif string.find(_telescope, 'PS1') != -1:
            _telescope = 'PS1'
        elif string.find(_telescope, 'SWIFT') != -1:
            _telescope = 'swift'
        elif string.find(_telescope, 'Dutch') != -1:
            _telescope = 'dutch'
        elif string.find(_telescope, 'ct13m') != -1:
            _telescope = 'ctio'
        elif string.find(_telescope, 'Montsec') != -1:
            _telescope = 'montsec'
        elif string.find(_telescope, 'TRAPPIST') != -1:
            _telescope = 'trp'
        elif string.find(_telescope, 'Sampur') != -1:
            _telescope = 'sampur'
        elif string.find(_telescope, 'other') != -1:
            _telescope = 'other'
        elif _telescope in ['1m0-04', '1m0-05', '1m0-08', '1m0-09']:
            _telescope = 'lsc'
        elif _telescope in ['0m4-14', '0m4-15', '0m4-12', '0m4-13']:
            _telescope = 'lco0m4'
        elif _telescope in ['DFOT']:
            _telescope = 'DFOT'
        elif _telescope in ['ST']:
            _telescope = 'ST'
        elif _telescope in telescopelist:
            pass
        else:
            try:
                _telescope = geth['OBSERVAT']
                if string.find(_telescope, 'Prompt') != -1:
                    _telescope = 'prompt'
                elif string.find(_telescope, 'Meckering') != -1:
                    _telescope = 'Meckering'
                else:
                    _telescope = ''
            except:
                _telescope = ''
    except:
        _telescope = ''

    if _telescope == '':
        try:
            _instrume = geth['INSTRUME']
            if string.find(_instrume, 'HFOSC') != -1:
                _telescope = 'hct'
            else:
                _telescope = ''
        except:
            _telescope = ''
    if _telescope == '':
        try:
            _telescope = geth['QUBATELE']
            if string.find(_telescope, 'other') != -1:
                _telescope = 'other'
            else:
                _telescope = ''
        except:
            if pyfits.open(img)[0].header.get('PSCAMERA') == 'GPC1':
                _telescope = 'PS1'
            else:
                _telescope = ''
    if _telescope == '':
        print 'WARNING: Telescope not defined !!!'
    return _telescope
def get_all_calspec_hd():

    SEDfile_dir = os.path.join(top_pysynphot_data_dir, 'calspec')
    filelist = os.listdir(SEDfile_dir)
    fits_files = [f for f in os.listdir(SEDfile_dir) if f.endswith('.fits')]

    # extract header and filename
    star_header = []
    star_file_calspec = []
    for filename in filelist:
        index = 0
        if re.search('fits',
                     filename) and re.search('hd', filename) and re.search(
                         'stis', filename):  #example of filename filter
            index += 1
            fullfilename = os.path.join(SEDfile_dir, filename)
            hdr = fits.getheader(fullfilename)
            star_header.append(hdr)
            star_file_calspec.append(filename)

    # extract starname
    star_names = []
    index = 0
    for hdr in star_header:
        #    print index
        if index != 433:
            star_name = star_header[index]['TARGETID']
            star_names.append(star_name)
            index += 1
        else:
            print '>>>>>> skip file # ', index, 'BAD HEADER'
            print '>>>>>> filename = ', filelist[index]
            print hdr
            index += 1

    # sort filename
    star_names_sorted = sorted(star_names, key=star_names.count, reverse=True)
    star_names_sorted_upper = map(lambda s: s.upper(), star_names_sorted)
    star_names_set = set(star_names_sorted_upper)

    # build dictionnary of filename (several files per star name)
    StarDict = {}
    for star in star_names_set:
        print star, ': \n'
        star_set_of_file = []
        tag_upper = '^' + star.upper() + '*'
        tag_lower = '^' + star.lower() + '*'

        for thefile in fits_files:
            if re.search(tag_upper, thefile) or re.search(tag_lower, thefile):
                star_set_of_file.append(thefile)
        #StarDict[star]=sorted(star_set_of_file,key=star_names.count,reverse=True)
        StarDict[star] = sorted(star_set_of_file, key=star_names.count)
        print StarDict[star], '\n'

    # SED
    all_sed = []
    for keystar in StarDict:
        the_files = StarDict[keystar]
        if (len(the_files)) > 0 and keystar != 'SUN_REFERENCE':
            #print sorted(the_files,reverse=True)

            selected_file = the_files[0]
            selected_fullfile = os.path.join(SEDfile_dir, selected_file)

            sed = S.FileSpectrum(selected_fullfile)
            if (max(sed.flux) >
                    0):  # remove empty fluxes because of bad parameters
                all_sed.append(sed)
    return all_sed
Beispiel #50
0
def poet_dataread(event, type=0, log=None):
    """
    This function reads a set of IRAC AORS, (or IRAC Subarray AORS),
       sorting by dither position, if any.

    Parameters:
    ----------
    event : An event object.
    type  : integer
            Specifies the type of data to read.
            0 = data, 1 = precalibration data, 2 = postcalibration data.
    log   : A logedit object that keeps the log.

    Outputs:
    -------
    data  : [maxnimpos, nx, ny, npos] float array containing the data
            frames, sorted into dither positions.
    head  : header data
    uncd  : [maxnimpos, nx, ny, npos] float array, uncertainties
    bdmskd: [maxnimpos, nx, ny, npos] int array, per-pixel data flag
    nimpos: array like
            array containing the number of frames in each position.
    fp:     FrameParameters object containing [npos, maxnimpos] double arrays
            of per-frame parameters.

    Example:
    -------

    Modification History:
    --------------------
    Written by:	Joseph Harrington, Cornell.
    2005-09-16  [email protected]
    2005-10-26  jh        Fixed frame times.
    2005-10-27	jh        Moved calculation of some constants out of the
		          routine.  Filled in header.  Corrected some
		          array datatypes.
    2005-11-25	jh        Converted to using FP array.
    2006-01-04	jh        Header tweak.
    2006-03-20  jh        Added zodi, ism, cib header values.
    2007-03-07  khorning  Adapted program to use for non-subarray data
    2007-07-15  jh        Made nimpos be a long, not integer, array.
    2010-08-24  patricio  Converted to python.
    2010-10-27  patricio  Comments added.

  """
    # General variables
    dpref = event.dpref  # data directory preffix
    expadj = event.expadj  # id number of fisrt image
    ndcenum = event.ndcenum  # number of dcenum
    npos = event.npos  # number of position
    nnod = event.nnod  # number of nodding positions
    #fpref     = event.fpref          # file names prefix
    pipev = event.pipev  # spitzer pipeline version
    bcddir = event.inst.bcddir  # directory containing bcd files
    bcdsuf = event.inst.bcdsuf  # bcd file suffix
    buncsuf = event.inst.buncsuf  # uncertainities file preffix
    #bdmsksuf  = event.inst.bdmsksuf  # badpixelmask file suffix
    brmsksuf = event.inst.brmsksuf  # badpixelmask file suffix
    masksuf = event.masksuf  # badpixelmask file suffix
    nx = event.nx  #
    ny = event.ny  #
    nz = event.nz  # number of subarrays in datafile
    nh = event.nh  #
    framtime = event.framtime  #
    bcdlist = event.bcdfiles  # lists of files

    # AORs/cal AORs variables
    aorname = event.aorname[np.where(event.aortype == type)]
    if type != 0:
        naor = event.calnaor
        nexpid = event.calnexpid
        maxnimpos = event.calmaxnimpos
        nmcyc = event.calnmcyc
    else:
        naor = event.naor
        nexpid = event.nexpid
        maxnimpos = event.maxnimpos
        nmcyc = event.nmcyc
        nscyc = event.nscyc

    # Allocate space for returned arrays
    headerdtype = 'a' + str(nh * 81)
    #head   = np.zeros( (maxnimpos / nz, npos), dtype=headerdtype)
    print(maxnimpos / nz)
    maxnimpos = int(maxnimpos)
    print(maxnimpos / nz)
    print(npos)
    print(nz)

    #nz = int(nz)
    #npos = int(npos)
    #print(headerdtype)
    tmp = int(maxnimpos / nz)
    head = np.zeros((tmp, npos), dtype=headerdtype)
    data = np.zeros((maxnimpos, ny, nx, npos), dtype=float)
    uncd = np.zeros((maxnimpos, ny, nx, npos), dtype=float)
    bdmskd = np.zeros((maxnimpos, ny, nx, npos), dtype=int)
    brmskd = np.zeros((maxnimpos, ny, nx, npos), dtype=int)

    # Allocate space for the frame paramaters
    fp = FrameParameters()
    fpsize = np.zeros((npos, maxnimpos))
    fp.frmobs = np.copy(fpsize)  # sequential frame number
    fp.pos = np.copy(fpsize)  # position number
    fp.aor = np.copy(fpsize)  # sequential AOR number
    fp.expid = np.copy(fpsize)  # EXPosure ID
    fp.dce = np.copy(fpsize)  # Data Collection Event
    fp.subarn = np.copy(fpsize)  # subarray frame number
    fp.time = np.copy(fpsize)  # frame mid-time, seconds J2000.0
    fp.bmjd = np.copy(fpsize)  # frame mid-time, BJD-2400000.5
    fp.zodi = np.copy(fpsize)  # zodiacal light estimate, see header comment
    fp.ism = np.copy(fpsize)  # interstellar medium estimate,see head comment
    fp.cib = np.copy(fpsize)  # cosmic infrared background,see header comment
    fp.afpat2b = np.copy(fpsize)  # temperatures, K, see header comment
    fp.afpat2e = np.copy(fpsize)
    fp.ashtempe = np.copy(fpsize)
    fp.atctempe = np.copy(fpsize)
    fp.acetempe = np.copy(fpsize)
    fp.apdtempe = np.copy(fpsize)
    fp.acatmp1e = np.copy(fpsize)
    fp.acatmp2e = np.copy(fpsize)
    fp.acatmp3e = np.copy(fpsize)
    fp.acatmp4e = np.copy(fpsize)
    fp.acatmp5e = np.copy(fpsize)
    fp.acatmp6e = np.copy(fpsize)
    fp.acatmp7e = np.copy(fpsize)
    fp.acatmp8e = np.copy(fpsize)
    # mips frame parameters
    fp.cmd_t_24 = np.copy(fpsize)
    fp.ad24tmpa = np.copy(fpsize)
    fp.ad24tmpb = np.copy(fpsize)
    fp.acsmmtmp = np.copy(fpsize)
    fp.aceboxtm = np.copy(fpsize)
    fp.pxscl2 = np.copy(fpsize)
    fp.pxscl1 = np.copy(fpsize)

    fp.heady = np.copy(fpsize)
    fp.headx = np.copy(fpsize)
    fp.filename = np.zeros((npos, maxnimpos), dtype='S150')

    nimpos = np.zeros(npos, np.long)

    # conveniences
    salist = np.arange(nz)
    sadind = np.arange(nz, dtype=np.double)

    # position of the star
    sky = [[event.ra * 180 / np.pi, event.dec * 180 / np.pi]]

    # dictionary to get position in MIPS
    mirind = {
        1929.: 0,
        2149.5: 1,
        1907.5: 2,
        2128.: 3,
        1886.: 4,
        2106.5: 5,
        1864.5: 6
    }

    # Write to log first line
    if log != None:
        log.writelog('  aor  expid  dcenum   pos')
    else:
        print('  aor  expid  dcenum   pos')

    # pattern to find     expid      dcenum
    pattern = re.compile("_([0-9]{4})_([0-9]{4})_")

    # Obtain data
    for aor in np.arange(naor):
        direc = dpref + aorname[aor] + bcddir
        bcd = bcdlist[aor]

        for i in np.arange(len(bcd)):
            # Read data
            try:
                dataf = pf.getdata(direc + bcd[i])
                bcdhead = pf.getheader(direc + bcd[i])
            except:  # If a file doesn't exist, skip to next file.
                log.writelog(direc + bcd[i] + " File not found!")
                continue

            try:  # Read uncertainity and mask files
                # Replace suffix in bcd file to get the corresponding file.
                uncfile = re.sub(bcdsuf, buncsuf, direc + bcd[i])
                uncf = pf.getdata(uncfile)
                mskfile = re.sub(bcdsuf, masksuf, direc + bcd[i])
                bdmskf = pf.getdata(mskfile)
            except:
                bdmskf = np.ones((nz, ny, nx), np.long)

            try:  # Mips
                brmskfile = re.sub(bcdsuf, brmsksuf, direc + bcd[i])
                brmskf = pf.getdata(brmskfile)
            except:
                brmskf = -np.ones((nz, ny, nx), np.long)

            # Obtain expid and dcenum
            index = pattern.search(bcd[i])
            expid = int(index.group(1))
            dcenum = int(index.group(2))

            # Do I really need this?
            if np.size(bdmskf) == 1:
                bdmskf = -np.ones((nz, ny, nx), np.long)
            if np.size(brmskf) == 1:
                brmskf = -np.ones((nz, ny, nx), np.long)

            # Find dither position
            try:
                pos = bcdhead['DITHPOS'] - 1
            except:
                pos = 0  # No dither position in stare data
            if event.inst.name == 'irs':
                pos = expid % npos
            elif event.inst.name == 'mips':
                nod = expid % nnod
                pos = nod * nscyc + mirind[bcdhead['CSM_PRED']]

            be = nimpos[pos]  # begining
            en = nimpos[pos] + nz  # end

            # Store data
            data[be:en, :, :, pos] = dataf.reshape((nz, ny, nx))
            uncd[be:en, :, :, pos] = uncf.reshape((nz, ny, nx))
            bdmskd[be:en, :, :, pos] = bdmskf.reshape((nz, ny, nx))
            brmskd[be:en, :, :, pos] = brmskf.reshape((nz, ny, nx))
            # All the single numbers per frame that we care about
            fp.frmobs[pos, be:en] = np.sum(nimpos) + salist
            fp.pos[pos, be:en] = pos
            fp.aor[pos, be:en] = aor
            fp.expid[pos, be:en] = expid
            fp.dce[pos, be:en] = dcenum
            fp.subarn[pos, be:en] = salist
            fp.bmjd[pos,
                    be:en] = bcdhead['BMJD_OBS'] + framtime * (sadind +
                                                               0.5) / 86400.
            # ccampo 2011/3/18: changed to UTC from SCLK to avoid timing inconsistencies
            try:
                fp.time[
                    pos,
                    be:en] = bcdhead['UTCS_OBS'] + framtime * (sadind + 0.5)
            except:
                pass
            try:
                fp.zodi[pos, be:en] = bcdhead['ZODY_EST']
                fp.ism[pos, be:en] = bcdhead['ISM_EST']
                fp.cib[pos, be:en] = bcdhead['CIB_EST']
                fp.afpat2b[pos, be:en] = bcdhead['AFPAT2B']
                fp.afpat2e[pos, be:en] = bcdhead['AFPAT2E']
                fp.ashtempe[pos, be:en] = bcdhead['ASHTEMPE'] + 273.0
                fp.atctempe[pos, be:en] = bcdhead['ATCTEMPE'] + 273.0
                fp.acetempe[pos, be:en] = bcdhead['ACETEMPE'] + 273.0
                fp.apdtempe[pos, be:en] = bcdhead['APDTEMPE'] + 273.0
                fp.acatmp1e[pos, be:en] = bcdhead['ACATMP1E']
                fp.acatmp2e[pos, be:en] = bcdhead['ACATMP2E']
                fp.acatmp3e[pos, be:en] = bcdhead['ACATMP3E']
                fp.acatmp4e[pos, be:en] = bcdhead['ACATMP4E']
                fp.acatmp5e[pos, be:en] = bcdhead['ACATMP5E']
                fp.acatmp6e[pos, be:en] = bcdhead['ACATMP6E']
                fp.acatmp7e[pos, be:en] = bcdhead['ACATMP7E']
                fp.acatmp8e[pos, be:en] = bcdhead['ACATMP8E']
            except:
                pass

            try:
                fp.pxscl2[pos, be:en] = np.abs(bcdhead['PXSCAL2'])
                fp.pxscl1[pos, be:en] = np.abs(bcdhead['PXSCAL1'])
                fp.acatmp5e[pos, be:en] = bcdhead['CMD_T_24']
                fp.acatmp6e[pos, be:en] = bcdhead['AD24TMPA']
                fp.acatmp6e[pos, be:en] = bcdhead['AD24TMPB']
                fp.acatmp5e[pos, be:en] = bcdhead['ACSMMTMP']
                fp.acatmp6e[pos, be:en] = bcdhead['ACEBOXTM'] + 273.0
            except:
                pass

            # Store filename
            fp.filename[pos, be:en] = direc + bcd[i]

            # Store header
            head[np.int(nimpos[pos] / nz), pos] = np.str(bcdhead)

            # Header position of the star:
            bcdhead["NAXIS"] = 2

            wcs = pw.WCS(bcdhead, naxis=2)
            pix = wcs.wcs_world2pix(sky, 0)
            fp.headx[pos, be:en] = pix[0][0]
            fp.heady[pos, be:en] = pix[0][1]

            # Print to log and screen:
            if log != None:
                log.writelog('%4d' % aor + '%7d' % expid + '%7d' % dcenum +
                             '%7d' % pos)
            else:
                print('%4d' % aor + '%7d' % expid + '%7d' % dcenum +
                      '%7d' % pos)

            nimpos[pos] += nz

    # frame tags in fp

    # where there exist data
    fp.exist = np.zeros((npos, maxnimpos), np.long)
    for pos in np.arange(npos):
        fp.exist[pos, 0:nimpos[pos]] = 1

    fp.im = np.copy(fpsize)  # Frame within position
    for pos in np.arange(npos):
        fp.im[pos, 0:nimpos[pos]] = np.arange(nimpos[pos], dtype=np.double)

    if event.inst.name != 'mips':
        fp.cycpos = np.trunc(fp.frmobs / (npos * nmcyc * nz))  # Cycle number
        fp.visobs = np.trunc(fp.frmobs /
                             (nmcyc * nz))  # Visit number within obs. set
        fp.frmvis = fp.im % (nmcyc * nz)  # Frame within visit

    else:
        fp.cycpos = np.trunc(fp.frmobs / (2 * ndcenum))  # Cycle number
        fp.visobs = np.trunc(fp.frmobs /
                             ndcenum)  # Visit number within obs. set
        fp.frmvis = np.trunc(fp.frmobs % ndcenum)  # Frame within visit

        # Image scale:
        for pos in np.arange(npos):
            last = nimpos[pos]
            if np.all(fp.pxscl1[pos, 0:last] == fp.pxscl1[pos, 0]):
                event.posscl[1, pos] = np.abs(fp.pxscl1[pos, 0])
            if np.all(fp.pxscl2[pos, 0:last] == fp.pxscl2[pos, 0]):
                event.posscl[0, pos] = np.abs(fp.pxscl2[pos, 0])

    # Update event
    event.data = data
    event.uncd = uncd
    event.bdmskd = bdmskd
    event.brmskd = brmskd
    event.head = head
    event.fp = fp
    event.nimpos = nimpos

    return
import subprocess

conf = json.load(open('config.json'))
root = conf['root']
os.chdir(root)
#date_list = glob.glob('20190402')
date_list = glob.glob('{}/{}/{}/'.format(conf['fakes'], conf['field'], conf['date']))
date_list = filter(lambda x: not x.endswith('gz'), date_list)
for i in date_list:
    os.chdir(root+i)
    #field_list = glob.glob('A0b')
    field_list = glob.glob('.')
    for j in field_list:
        #print(j)
        os.chdir(root+i+'/'+j)
        ccd_list = glob.glob(conf["ccd"])
        #ccd_list = ['CCD62']
        for k in ccd_list:
            os.chdir(root+i+'/'+j+'/'+k)
            print(root+i+'/'+j+'/'+k)
            fits_list = [i.rstrip('diff.fits') for i in glob.glob('*.diff.fits')]
            sub_fits = [i.rstrip('.diff.sub.fits') for i in glob.glob('*.diff.sub.fits')]
            for l in fits_list:
                print(l)
                #zp = fits.getheader('{}.diff.fits'.format(l), 1)['MAGZERO']
                zp = fits.getheader('{}s.diff.fits'.format(l), 0)['MAGZERO']
                cat = '{}s.diff.cat'.format(l)
                subimg = '{}s.diff.sub.fits'.format(l)
                command = 'sex -DETECT_THRESH 5.0 -CATALOG_NAME {} -CHECKIMAGE_TYPE OBJECTS -CHECKIMAGE_NAME {} -MAG_ZEROPOINT {} {}s.diff.fits'.format(cat, subimg, zp, l)
                p0 = subprocess.call(shlex.split(command))
Beispiel #52
0
master_flat_path = 'outputs/masterflat_20160708.fits'
master_dark_path = 'outputs/masterdark_20160708.fits'

flat = fits.getdata(master_flat_path)
dark = fits.getdata(master_dark_path)

from skimage.feature import peak_local_max

mid = image_shape[0] // 2

times = []
with ProgressBar(len(paths)) as bar:
    for i, path in enumerate(paths):

        raw_image = fits.getdata(path) / flat
        times.append(fits.getheader(path)['JD'])
        # mask, cleaned_image = detect_cosmics(raw_image)
        #cleaned_image = raw_image
        # smoothed_image = gaussian_filter(cleaned_image, 10)

        coordinates = peak_local_max(raw_image,
                                     min_distance=10,
                                     num_peaks=1,
                                     exclude_border=10)
        y_mean = int(coordinates[:, 1].mean())
        x_mean = int(coordinates[:, 0].mean())

        # y, x = np.unravel_index(np.argmax(smoothed_image), smoothed_image.shape)

        firstroll = np.roll(
            raw_image,
Beispiel #53
0
    _site=option.site
    _force=option.force

    if '-' not in str(epoch): 
        epoch0=datetime.date(int(epoch[0:4]),int(epoch[4:6]),int(epoch[6:8]))
        listepoch=[re.sub('-','',str(epoch0))]
    else:
        epoch1,epoch2=string.split(epoch,'-')
        start=datetime.date(int(epoch1[0:4]),int(epoch1[4:6]),int(epoch1[6:8]))
        stop=datetime.date(int(epoch2[0:4]),int(epoch2[4:6]),int(epoch2[6:8]))
        listepoch=[re.sub('-','',str(i)) for i in [start + datetime.timedelta(days=x) for x in range(0,(stop-start).days)]]


    directory = agnkey.util.workingdirectory+'floydsraw2/'+_site
    for epoch in listepoch:
      path = directory+'/'+epoch+'/raw/'
      imglist=glob.glob(path+'*fits')
      print imglist

      # ingest
      #ingestfloyds(imglist,_force)

      # make plot
      for img in imglist:
        hdr = pyfits.getheader(img)
        if  hdr.get('OBSTYPE')=='SPECTRUM':
          html = makeplot(img,False)
          print html


Beispiel #54
0
def fix_file(row, input_folder=None, check_corrector=None, force_version=None, tpf_rootdir=None):

	logger = logging.getLogger(__name__)
	fname = os.path.join(input_folder, row['lightcurve'])

	fname_original = regex_fileend.sub('.original.fits.gz', fname)
	if os.path.exists(fname_original):
		raise RuntimeError(f"ORIGINAL exists: {fname_original}")

	dataval = int(row['dataval'])
	modification_needed = False

	m = regex_filename.match(os.path.basename(fname))
	if not m:
		raise RuntimeError("RegEx doesn't match!")

	starid = int(m.group(1))
	sector = int(m.group(2))
	camera = int(m.group(3))
	ccd = int(m.group(4))
	cadence = int(m.group(5))
	datarel = int(m.group(6))
	version = int(m.group(7))
	corrector = m.group(8)

	# Basic checks:
	if starid != row['starid']:
		raise RuntimeError("STARID")
	if sector != row['sector']:
		raise RuntimeError("SECTOR")
	if camera != row['camera']:
		raise RuntimeError("CAMERA")
	if ccd != row['ccd']:
		raise RuntimeError("CCD")
	#if cadence != row['cadence']:
	#	raise RuntimeError("CADENCE")
	if force_version is not None and version != force_version:
		#modification_needed = True
		raise RuntimeError("Version mismatch!")
	if corrector != check_corrector:
		raise RuntimeError("CORRECTOR")

	# Do we really need to modify the FITS file?
	openfile_needed = True # FORCE modification check!
	fix_wcs = False

	# We need to open if there is a dataval to add to the header
	if dataval > 0:
		openfile_needed = True

	# Fix for bug with WCS being incorrect in TPF lightcurves
	if cadence == 120 and version <= 5:
		openfile_needed = True
		fix_wcs = True

	# Because of the problem with multiple MJD-OBS keywords
	# in FITS headers, we have to check files in these cases.
	# TODO: Modify this when we know the CAUSE of this SYMPTOM
	if version <= 5:
		openfile_needed = True

	# We need to open the ensemble files to find the lightcurve dependencies:
	if corrector == 'ens':
		openfile_needed = True

	# Find the starid of the TPF which was used to create this lightcurve:
	if row['datasource'] == 'tpf':
		dependency_tpf = row['starid']
	elif row['datasource'].startswith('tpf:'):
		dependency_tpf = int(row['datasource'][4:])
	else:
		dependency_tpf = None

	# Placeholder for dependencies between lightcurves:
	dependency_lc = None

	# Damn, it looks like a modification is needed:
	allow_change = []
	if openfile_needed:
		logger.debug("Opening FITS file: %s", fname)
		modification_needed = False

		if fix_wcs:
			if tpf_rootdir is None:
				raise RuntimeError("You need to provide a TPF_ROOTDIR")
			# Find out what the
			if dependency_tpf is None:
				raise RuntimeError("We can't fix WCSs of FFI targets!")
			# Find the original TPF file and extract the WCS from its headers:
			tpf_file = find_tpf_files(tpf_rootdir, starid=dependency_tpf, sector=sector, camera=camera, ccd=ccd, cadence=cadence)
			if len(tpf_file) != 1:
				raise RuntimeError(f"Could not find TPF file: starid={dependency_tpf:d}, sector={sector:d}")
			# Extract the FITS header with the correct WCS:
			with warnings.catch_warnings():
				warnings.filterwarnings('ignore', category=FITSFixedWarning)
				wcs_header = WCS(header=fits.getheader(tpf_file[0], extname='APERTURE'), relax=True).to_header(relax=True)

		atomic_copy(fname, fname_original)
		with fits.open(fname_original, mode='readonly', memmap=True) as hdu:
			prihdr = hdu[0].header

			# Check if the current DATAVAL matches what it should be:
			current_dataval = prihdr.get('DATAVAL')
			if current_dataval != dataval:
				modification_needed = True
				allow_change += ['DATAVAL']
				if current_dataval is None:
					# Insert DATAVAL keyword just before CHECKSUM:
					prihdr.insert('CHECKSUM', ('DATAVAL', dataval, 'Data validation flags'))
				else:
					prihdr['DATAVAL'] = dataval

			if corrector == 'ens' and version <= 5:
				if hdu['ENSEMBLE'].header.get('TDISP2') == 'E':
					logger.info("%s: Changing ENSEMBLE/TDISP2 header", fname)
					modification_needed = True
					allow_change += ['TDISP2']
					hdu['ENSEMBLE'].header['TDISP2'] = 'E26.17'

			if corrector == 'ens':
				# Pick out the list of TIC-IDs used to build ensemble:
				dependency_lc = list(hdu['ENSEMBLE'].data['TIC'])

			if fix_wcs:
				logger.info("%s: Changing WCS", fname)
				modification_needed = True
				allow_change += ['CRPIX1', 'CRPIX2']
				mjdref_remove = ('MJDREF' not in hdu['APERTURE'].header)
				hdu['APERTURE'].header.update(wcs_header)
				hdu['SUMIMAGE'].header.update(wcs_header)
				if mjdref_remove:
					hdu['APERTURE'].header.remove('MJDREF', ignore_missing=True, remove_all=True)
					hdu['SUMIMAGE'].header.remove('MJDREF', ignore_missing=True, remove_all=True)

			# Fix bug with multiple MJD-OBS keywords in FITS headers:
			if version <= 5: # TODO: Modify this when we know the CAUSE of this SYMPTOM
				for extname in ('APERTURE', 'SUMIMAGE'):
					if list(hdu[extname].header.keys()).count('MJD-OBS') > 1:
						logger.info("%s: Multiple MJD-OBS in %s", fname, extname)
						mjdobs = hdu[extname].header['MJD-OBS']
						indx = hdu[extname].header.index('MJD-OBS')
						hdu[extname].header.remove('MJD-OBS', remove_all=True)
						hdu[extname].header.insert(indx, ('MJD-OBS', mjdobs, '[d] MJD at start of observation'))
						allow_change += ['MJD-OBS']
						modification_needed = True

			if modification_needed:
				hdu.writeto(fname, output_verify='exception', checksum=True, overwrite=True)

	if modification_needed:
		try:
			if check_fits_changes(fname_original, fname, allow_header_value_changes=allow_change):
				os.remove(fname_original)
			else:
				logger.error("File check failed: %s", fname)
				raise RuntimeError(f"File check failed: {fname}")
		except: # noqa: E722
			logger.exception("Whoops: %s", fname)
			if os.path.isfile(fname_original) and os.path.getsize(fname_original) > 0:
				if os.path.exists(fname):
					os.remove(fname)
				os.rename(fname_original, fname)
			raise

	elif os.path.exists(fname_original):
		os.remove(fname_original)

	# Extract information from final file:
	filesize = os.path.getsize(fname)
	filehash = get_filehash(fname)

	# Check that filesize is not zero:
	if filesize == 0:
		raise RuntimeError(f"File has zero size: {fname}")

	return {
		'priority': row['priority'],
		'starid': row['starid'],
		'sector': row['sector'],
		'camera': row['camera'],
		'ccd': row['ccd'],
		'cbv_area': row['cbv_area'],
		'cadence': cadence,
		'lightcurve': row['lightcurve'],
		'dataval': dataval,
		'datarel': datarel,
		'version': version,
		'filesize': filesize,
		'filehash': filehash,
		'dependency_tpf': dependency_tpf,
		'dependency_lc': dependency_lc
	}
Beispiel #55
0
    # ----------------------------------------------------------------------
    # find one example of each extension
    examples = find_examples(WORKSPACE)

    # for each example we need to open the file and print to a string
    lines = []
    # loop around examples
    for example in np.sort(list(examples.keys())):
        # filename
        example_filename = examples[example]
        # add line to lines with which example this is
        lines.append(' - {0}'.format(example))
        lines.append('')
        lines.append('```')
        # open file
        header = fits.getheader(example_filename)
        # loop around header keys
        for key in header:
            if key.startswith(KEY_PREFIX):
                largs = [key, header[key], header.comments[key]]
                lines.append('{0:9s} {1} // {2}'.format(*largs))
        lines.append('```')
        lines.append('')
        lines.append('')

    # write to file
    f = open(OUTPUTFILE, 'w')
    for line in lines:
        f.write(line + '\n')
    f.close()
    w1, f1 = d1
    i1 = np.argmin(abs(w1 - center))
    v1 = f1[i1]

    w2, f2 = d2
    i2 = np.argmin(abs(w2 - center))
    v2 = f2[i2]

    return min([v1])


if __name__ == '__main__':
    regions = [[10000, 10100], [10130, 10230], [12200, 12300]]

    lines = np.loadtxt('Felines.moog', usecols=(0, ))
    wArcturus = get_wavelength(fits.getheader('ArcturusSummer.fits'))
    fArcturus = fits.getdata('ArcturusSummer.fits')

    w10Leo1 = get_wavelength(fits.getheader('10LeoYJ.fits'))
    f10Leo1 = fits.getdata('10LeoYJ.fits')
    w10Leo2 = get_wavelength(fits.getheader('10LeoH.fits'))
    f10Leo2 = fits.getdata('10LeoH.fits')
    w10Leo3 = get_wavelength(fits.getheader('10LeoK.fits'))
    f10Leo3 = fits.getdata('10LeoK.fits')

    f10Leo1, w10Leo1 = dopplerShift(w10Leo1, f10Leo1, -82.53)
    f10Leo2, w10Leo2 = dopplerShift(w10Leo2, f10Leo2, -81.82)
    f10Leo3, w10Leo3 = dopplerShift(w10Leo3, f10Leo3, -81.37)

    for i, region in enumerate(regions):
        if i != 1:
Beispiel #57
0
def get_segment_offset(segment_number, detector, library_list):
    """Convert vectors coordinates in the local segment control
    coordinate system to NIRCam detector X and Y coordinates,
    at least proportionally, in order to calculate the location
    of the segment PSFs on the given detector.

    Parameters
    ----------
    segment : int
        Segment ID, i.e 3
    detector : str
        Name of NIRCam detector
    library_list : list
        List of the names of the segment PSF library files

    Returns
    -------
    x_arcsec
        The x offset of the segment PSF in arcsec
    y_arcsec
        The y offset of the segment PSF in arcsec
    """

    # Verify that the segment number in the header matches the index
    seg_index = int(segment_number) - 1
    header = fits.getheader(library_list[seg_index])

    assert int(header['SEGID']) == int(segment_number), \
        "Uh-oh. The segment ID of the library does not match the requested " \
        "segment. The library_list was not assembled correctly."
    xtilt = header['XTILT']
    ytilt = header['YTILT']
    segment = header['SEGNAME'][:2]
    sm_piston = header.get('SMPISTON', 0)

    # SM piston has, as one of its effects, adding tilt onto each segment,
    # along with higher order WFE such as defocus. We model here the effect
    # of SM piston onto the x and y offsets.
    # Coefficients determined based on WAS influence function matrix, as
    # derived from segment control geometries.
    if segment.startswith('A'):
        xtilt += sm_piston * 0.010502
    elif segment.startswith('B'):
        xtilt += sm_piston * -0.020093
    elif segment.startswith('C'):
        ytilt += sm_piston * 0.017761

    # Next we work out the individual offsets from segment-level tilts
    control_xaxis_rotations = {
        'A1': 180,
        'A2': 120,
        'A3': 60,
        'A4': 0,
        'A5': -60,
        'A6': -120,
        'B1': 0,
        'C1': 60,
        'B2': -60,
        'C2': 0,
        'B3': -120,
        'C3': -60,
        'B4': -180,
        'C4': -120,
        'B5': -240,
        'C5': -180,
        'B6': -300,
        'C6': -240
    }

    x_rot = control_xaxis_rotations[segment]  # degrees
    x_rot_rad = x_rot * np.pi / 180  # radians

    # Note that y is defined as the x component and x is defined as the y component.
    # This is because "xtilt" moves the PSF in the y direction, and vice versa.
    tilt_onto_y = (xtilt * np.cos(x_rot_rad)) - (ytilt * np.sin(x_rot_rad))
    tilt_onto_x = (xtilt * np.sin(x_rot_rad)) + (ytilt * np.cos(x_rot_rad))

    umrad_to_arcsec = 1e-6 * (180. / np.pi) * 3600
    x_arcsec = 2 * umrad_to_arcsec * tilt_onto_x
    y_arcsec = 2 * umrad_to_arcsec * tilt_onto_y

    # Secondary mirror tilts and translations also shift the apparent location of each PSF,
    # often referred to as "changing the boresight".
    # Coefficients for this are worked out by Randal Telfer in
    # "JWST Secondary Mirror Influence Functions", doc #JWST-PRES-043631
    # Values here are taken from Rev C of that document. They are given in units of NIRCam SW pixels per micro-unit of SM pose.
    # We include just the first order terms, neglecting the small higher order terms
    sm_xtilt = header.get('SMXTILT', 0)
    sm_ytilt = header.get('SMYTILT', 0)
    sm_xtrans = header.get('SMXTRANS', 0)
    sm_ytrans = header.get('SMYTRANS', 0)

    nrc_pixelscale = 0.0311  # arcsec/pixel
    x_boresight_offset = (1.27777 * sm_ytilt -
                          0.71732 * sm_xtrans) * nrc_pixelscale
    y_boresight_offset = (-1.27363 * sm_xtilt -
                          0.71571 * sm_ytrans) * nrc_pixelscale

    x_arcsec += x_boresight_offset
    y_arcsec += y_boresight_offset

    # Optionally, arbitrary boresight offset may also be present in the FITS header metadata.
    # If so, include that in the PSF too. Be careful about coordinate sign for the V2 axis!
    try:
        x_arcsec -= header[
            'BSOFF_V2'] * 60  # BS offset values in header are in arcminutes
        y_arcsec += header['BSOFF_V3'] * 60  #
    except:
        pass

    return x_arcsec, y_arcsec
Beispiel #58
0
def main(path0, silent=False, verbose=True, overwrite=False):
    '''
    main() function to obtain information from FITS header and write to
    ASCII file

    Parameters
    ----------
    path0 : str
     Path to FITS file. Must include '/' at the end

    silent : boolean
      Turns off stdout messages. Default: False

    verbose : boolean
      Turns on additional stdout messages. Default: True

    Returns
    -------
    tab0 : astropy.table.Table
     Astropy ASCII table written to [path0]+'hdr_info.tbl'

    Notes
    -----
    Created by Chun Ly, 4 March 2017
     - Later re-organized to check for file first
    Modified by Chun Ly, 5 March 2017
     - File exists warning always printed out
     - Include AIRMASS
    Modified by Chun Ly, 23 March 2017
     - Call dir_check.main() to handle multiple date directories
    Modified by Chun Ly, 11 May 2017
     - Handle longer filter1 and filter2 FITS values
    Modified by Chun Ly,  8 December 2017
     - Import glog and call for stdout and ASCII logging
     - Pass mylogger to dir_check.main()
    '''

    # Moved up on 10/12/2017
    logfile = path0 + 'hdr_info.log'
    mylogger = glog.log0(logfile)._get_logger()

    if silent == False: mylogger.info('### Begin main : ' + systime())

    dir_list, list_path = dir_check.main(path0,
                                         mylogger=mylogger,
                                         silent=silent,
                                         verbose=verbose)

    # Mod on 23/03/2017
    for path in list_path:
        outfile = path + 'hdr_info.tbl'

        # Mod later on 04/03/2017 to not overwrite file
        # Mod on 05/03/2017 to always print out this warning
        if overwrite == False and exists(outfile):
            # Mod on 08/12/2017
            mylogger.warning('File exists : ' + outfile)
            mylogger.warning('Not over-writing!!! ')
        else:
            fits_files = glob.glob(path + 'N*fits')
            n_files = len(fits_files)

            # Mod on 05/03/2017 to include airmass
            names0 = ('filename', 'datelabel', 'UT_date', 'obstype', 'object',
                      'exptime', 'airmass', 'grating', 'gratwave', 'filter1',
                      'filter2', 'slit')
            dtype0 = ('S20', 'S30', 'S25', 'S8', 'S100', 'f8', 'f8', 'S15',
                      'f8', 'S20', 'S20', 'S20')
            tab0 = Table(names=names0, dtype=dtype0)

            for nn in xrange(n_files):
                basename = os.path.basename(fits_files[nn])
                if silent == False: mylogger.info('Reading : ' + basename)
                h0 = fits.getheader(fits_files[nn])
                # Mod on 05/03/2017 to include airmass
                vec0 = [
                    basename, h0['DATALAB'], h0['DATE-OBS'] + 'T' + h0['UT'],
                    h0['OBSTYPE'], h0['OBJECT'], h0['EXPTIME'], h0['AIRMASS'],
                    h0['GRATING'], h0['GRATWAVE'], h0['FILTER1'],
                    h0['FILTER2'], h0['SLIT']
                ]
                tab0.add_row(vec0)

            if silent == False:
                mylogger.info('Writing : ' + outfile)  # Mod on 08/12/2017
            asc.write(tab0, outfile, format='fixed_width_two_line')
        #endelse
    #endfor

    if silent == False: mylogger.info('### End main : ' + systime())
}

while True:

    current_session = get_session().strftime(nightfmt)
    src_path = os.environ['DIRECTORY_TO_WATCH'] + current_session
    copy_path = os.environ['FEROS_DATA_PATH'] + current_session + '/RAW/'

    if not os.path.exists(copy_path):
        os.makedirs(copy_path)

    allimages = glob.glob(src_path + '/FEROS*.fits')

    for image in allimages:

        hdr = pf.getheader(image)
        imagename = image.split('/')[-1]

        if normal_obsmode['HIERARCH ESO DET READ CLOCK'] == hdr['HIERARCH ESO DET READ CLOCK'] and \
            normal_obsmode['CDELT1'] == hdr['CDELT1'] and \
            normal_obsmode['CDELT2'] == hdr['CDELT2'] and \
            hdr['HIERARCH ESO TPL NAME'] in [HIERARCH_ESO_TPL_NAME['bias'],HIERARCH_ESO_TPL_NAME['flat'],HIERARCH_ESO_TPL_NAME['lamp']]:

            cmd = 'rsync -avz %s %s' % (image, copy_path + imagename)
            status = subprocess.call(cmd, shell=True)

            print '---------------------------------------------------------------'
            print 'Copy file are:'
            print image
            print copy_path + image.split('/')[-1]
            print '---------------------------------------------------------------'
Beispiel #60
0
def batss_pointing_detect(obs_id, #should be BATSS_slew object?
    ra, dec,    # Source RA/Dec
    eband_name, # Source energy band
    err_rad):   # Source error radius (arcmin, 90%)
    '''
    Run imaging and detection for BAT pointings before and after a given slew,
    for given sky coordinates and energy band
    '''

    #Check input parameters
    obs = []    # Initialize observation list
    if isinstance(obs_id, list):
        for obs_id0 in obs_id:
            obs.append(BATSS_slew(obs_id0))
    else:
        obs.append(BATSS_slew(obs_id))
    pos = SkyCoord(ra, dec, unit='deg')
    coord_str = ('J'+pos.ra.to_string(unit='hour',pad=True,sep='',fields=2)
        +(10*pos.dec).to_string(pad=True,sep='',fields=1,alwayssign=True))
    coord_str_tex = coord_str[:5]+'$'+coord_str[5]+'$'+coord_str[6:] # TeX
    eband = BATSS_eband(eband_name)
    err_rad = err_rad * u.arcmin

    # Input/Output directories
    root = BATSS_dir.root
    dataroot = './data/'
    if not os.path.exists(dataroot):
        os.makedirs(dataroot)

    # Loop over BATSS observations
    for obs0 in obs:
        t0 = datetime.now()
        ##Time object with slew date
        #obs_date = Time('20'+obs0.id[:2]+'-'+obs0.id[2:4]+'-'+obs0.id[4:6])
        obs_date = datetime(int('20'+obs0.id[:2]), int(obs0.id[2:4]), int(obs0.id[4:6]))

        print(f'{70*"="} {datetime.now():%c}')
        print('BATSS Observation type and ID: ', obs0.type.upper(), obs0.id)
        print('Coordinates to search (J2000): ',pos.to_string('hmsdms'))
        print('Energy band: '+eband.name+' ('+eband.str_keV+')')

        # Output directories
        datadir = dataroot+obs0.type+'_'+obs0.id+'_'+coord_str+ '_'+eband.name+'/'
        if not os.path.exists(datadir):
            os.makedirs(datadir)
        tempdir = datadir+'temp/'
        if not os.path.exists(tempdir):
            os.makedirs(tempdir)
        # Initialize output txt file
        txtfile = datadir+obs0.type+'_'+obs0.id+'_'+coord_str+ '_'+eband.name+'.txt'
        f = open(txtfile, 'w')
        f.write(f'{70*"="} {datetime.now():%c}\n')
        f.write('BATSS Observation type and ID: '+obs0.type.upper()+' '+obs0.id+'\n')
        f.write('Coordinates to search (J2000): '+pos.to_string('hmsdms')+'\n')
        f.write('Energy band: '+eband.name+' ('+eband.str_keV+')'+'\n')
        f.close()

        #Input catalog file
        catfile_in = tempdir+'batss.in.cat'
        #  CATNUM: Source number within catalog
        #  NAME:   Source name
        #  RA_CAT/GLON_CAT: Catalogued source longitude
        #  DEC_CAT/GLAT_CAT: Catalogued source latitude
        #  RA_OBJ/GLON_OBJ: Source longitude (to be modified upon detection)
        #  DEC_OBJ/GLAT_OBJ: Source latitude  (to be modified upon detection)
        #  ERR_RAD_BATSS: BATSS error radius (90%, deg)
        cat_in_Table = Table(
            {'CATNUM':[0],
            'NAME':['BATSS_'+coord_str],
            'RA_OBJ':[pos.ra.value] * pos.ra.unit,
            'DEC_OBJ':[pos.dec.value] * pos.dec.unit,
            'RA_CAT':[pos.ra.value] * pos.ra.unit,
            'DEC_CAT':[pos.dec.value] * pos.ra.unit,
            'ERR_RAD_BATSS':[err_rad.to_value(u.deg)] * u.deg},
            names=('CATNUM','NAME','RA_OBJ','DEC_OBJ','RA_CAT','DEC_CAT',
                'ERR_RAD_BATSS')) #Specifies column order
        cat_in = fits.BinTableHDU(cat_in_Table, name='BAT_CATALOG')
        cat_in.header.set('HDUNAME', 'BAT_CATALOG', 'Name of extension',
            before='TTYPE1') #Necessary?
        cat_in.header.set('HDUCLASS', 'CATALOG', 'Source catalog',
            before='TTYPE1')
        cat_in.header.comments['TTYPE1'] = 'Source number within catalog'
        cat_in.header.set('TNULL1', -1, 'data null value', after='TFORM1')
        cat_in.header.comments['TTYPE2'] = 'Source name'
        cat_in.header.comments['TTYPE3'] = 'Detected source longitude'
        cat_in.header.comments['TUNIT3'] = 'physical unit of field'
        cat_in.header.set('TDISP3', 'F10.4', 'column display format',
            after='TUNIT3')
        cat_in.header.comments['TTYPE4'] = 'Detected source latitude'
        cat_in.header.comments['TUNIT4'] = 'physical unit of field'
        cat_in.header.set('TDISP4', 'F10.4', 'column display format',
            after='TUNIT4')
        cat_in.header.comments['TTYPE5'] = 'Catalogued source longitude'
        cat_in.header.comments['TUNIT5'] = 'physical unit of field'
        cat_in.header.set('TDISP5', 'F10.4', 'column display format',
            after='TUNIT5')
        cat_in.header.comments['TTYPE6'] = 'Catalogued source latitude'
        cat_in.header.comments['TUNIT6'] = 'physical unit of field'
        cat_in.header.set('TDISP6', 'F10.4', 'column display format',
            after='TUNIT6')
        cat_in.header.comments['TTYPE7'] = 'BATSS cat_in. error radius (90%)'
        cat_in.header.comments['TUNIT7'] = 'physical unit of field'
        cat_in.header.set('TDISP7', 'F6.4', 'column display format',
            after='TUNIT7')
        cat_in.writeto(catfile_in, overwrite=True)
        # Get master FITS header for slew (archival by default)
        flag_realtime = False
        if os.path.exists(obs0.fitsfile):
            hdrfile = obs0.fitsfile
            hdrext = 0
        else:
            print('Warning: No archival master FITS file found for'
                f' {obs0.type} {obs0.id}. Getting header info from queue file.')
            if os.path.exists(obs0.queuefile):
                hdrfile = obs0.queuefile
                hdrext = obs0.type+'_'+obs0.id
            else:
                print('Warning: No archival queue file found for'
                    f' {obs0.type} {obs0.id}. Getting header info from'
                    f' real-time data')
                flag_realtime = True
                if os.path.exists(obs0.fitsfile_realtime):
                    hdrfile = obs0.fitsfile_realtime
                    hdrext = 0
                else:
                    print('Warning: No real-time master FITS file found for'
                        f' {obs0.type} {obs0.id}. Getting header info from'
                        f' queue file.')
                    if os.path.exists(obs0.queuefile_realtime):
                        hdrfile = obs0.queuefile_realtime
                        hdrext = obs0.type+'_'+obs0.id
                    else:
                        raise IOError('Neither archival nor real-time files'
                            f' found for {obs0.type} {obs0.id}')
        #fitsfile = obs0.fitsfile_realtime if flag_realtime else obs0.fitsfile
        print('Header file: '+hdrfile)
        print('Extension:')
        print(hdrext)
        try:
            header = fits.getheader(hdrfile, hdrext)
        except IOError as err:
            raise IOError(err)
        except:
            print('Some other error! (hdrfile)')
        # Partial coding map
        pcfile = obs0.pcfile_realtime if flag_realtime else obs0.pcfile
        try:
            if not os.path.exists(pcfile):
                # Try getting default partial coding map
                print('Warning: Partial coding file ('
                    +('realtime' if flag_realtime else 'archival')
                    +') does not exist. Reading from default file.')
                pcfile = BAT_pcfile_def()
            print('Partial coding map file: '+pcfile)
            pcmap, pchdr = fits.getdata(pcfile, header=True)
        except IOError:
            raise
        except:
            print('Some other error! (pcfile)')
        else:
            dims_pcmap = np.shape(pcmap)
        # Attitude file
        attfile = obs0.attfile_realtime if flag_realtime else obs0.attfile
        try:
            if not os.path.exists(attfile):
                raise IOError('Attitude file ('+('realtime' if flag_realtime else 'archival')+') does not exist')
            att = fits.getdata(obs0.attfile, 1)
        except IOError:
            raise
        else:
            flag_settled = 192 # (binary) FLAGS field for settled spacecraft

        # Get time windows for preceding and following pointings
        obs_t0 = header['BEG_SLEW'] #[MET]
        gti_pre = {'start':0, 'stop':header['BEG_SLEW']} #[MET]
        gti_pre_sod = {'start':0, 'stop':int(obs0.id[7:9])*3600 + int(obs0.id[10:12])*60 + int(obs0.id[13:15])} #[SOD]
        gti_pos = {'start':header['END_SLEW'], 'stop':0} #[MET]
        gti_pos_sod = {'start':gti_pre_sod['stop'] + int(obs0.id[17:20]), 'stop':0} #[SOD]

        queuefile = obs0.queuefile_realtime if flag_realtime else obs0.queuefile
        try:
            with fits.open(queuefile) as queue_hdul:
                w = np.array([hdu.name == 'SLEW_'+obs0.id for hdu in queue_hdul]).nonzero()[0]
            assert len(w) == 1
        except IOError:
            raise
        else:
            w = w[0]

        # Beginning of preceding pointing
        if w == 1:
            # Get slew from previous day
            date_pre = obs_date - timedelta(days=1)
            queuefile_pre = root + f'products/{date_pre.year:04}_{date_pre.month:02}/queue{"_realtime" if flag_realtime else ""}/queue_{date_pre.year % 100:02}{date_pre.month:02}{date_pre.day:02}_{obs0.type}.fits'
            try:
                with fits.open(queuefile_pre) as queue_pre_hdul:
                    wpre = len(queue_pre_hdul)
                    gti_pre_sod['start'] = -86400
            except OSError:
                print('File not found: '+queuefile_pre)
                raise
        else:
            queuefile_pre = queuefile
            queue_pre_hdul = queue_hdul
            wpre = w-1
        slew_id_pre = queue_pre_hdul[wpre].name[5:]
        gti_pre['start'] = fits.getval(queuefile_pre, 'END_SLEW', ext=wpre)
        gti_pre_sod['start'] += int(slew_id_pre[7:9])*3600 + int(slew_id_pre[10:12])*60 + int(slew_id_pre[13:15]) + int(slew_id_pre[17:20])
        # End of following pointing
        if w == len(queue_hdul):
            # Get slew from following day
            date_pre = obs_date + timedelta(days=1)
            queuefile_pos = root + f'products/{date_pos.year:04}_{date_pos.month:02}/queue{"_realtime" if flag_realtime else ""}/queue_{date_pos.year % 100:02}{date_pos.month:02}{date_pos.day:02}_{obs0.type}.fits'
            try:
                with fits.open(queuefile_pos) as queue_pos_hdul:
                    wpos = len(queue_pos_hdul)
                    gti_pos_sod['stop'] = 86400
            except OSError:
                print('File not found: '+queuefile_pos)
                raise
        else:
            queuefile_pos = queuefile
            queue_pos_hdul = queue_hdul
            wpos = w+1
        slew_id_pos = queue_pos_hdul[wpos].name[5:]
        gti_pos['stop'] = fits.getval(queuefile_pos, 'BEG_SLEW', ext=wpos)
        gti_pos_sod['stop'] += (int(slew_id_pos[7:9])*3600 +
            int(slew_id_pos[10:12])*60 + int(slew_id_pos[13:15]))

        # Read AFST files for previous, current and following days
        afst_obs_id = []
        afst_yymmdd = []
        afst_start_sod = []
        afst_stop_sod = []
        for d in [-1,0,1]:
            date0 = obs_date + timedelta(days=d)
            yymmdd = f'{date0.year % 100:02}{date0.month:02}{date0.day:02}'
            afstfile = (root + f'products/{date0.year:04}_{date0.month:02}/'
                f'afst/afst_{date0.year % 100:02}{date0.month:02}'
                f'{date0.day:02}.html')
            try:
                with open(afstfile,'r') as f0:
                    afst_soup = BeautifulSoup(f0, features='lxml')
            except OSError:
                raise
            tr = afst_soup.find_all('tr') #, features='lxml')
            for tr0 in tr:
                try:
                    afst_class = tr0['class'][0]
                except KeyError:
                    continue
                if afst_class == 'header':
                    continue
                td0 = tr0.find_all('td')
                start0 = td0[0].get_text(strip=True)
                start_sod0 = (datetime(int(start0[:4]), int(start0[5:7]), int(start0[8:10])) - obs_date).days*86400 + int(start0[11:13])*3600 + int(start0[14:16])*60 + int(start0[17:19])
                stop0 = td0[1].get_text(strip=True)
                stop_sod0 = (datetime(int(stop0[:4]), int(stop0[5:7]), int(stop0[8:10])) - obs_date).days*86400 + int(stop0[11:13])*3600 + int(stop0[14:16])*60 + int(stop0[17:19])
                afst_obs_id.append(td0[2].a.text.zfill(8) + td0[3].a.text.zfill(3))
                afst_yymmdd.append(yymmdd)
                afst_start_sod.append(start_sod0)
                afst_stop_sod.append(stop_sod0)
        point = Table({'obs_id':afst_obs_id, 'yymmdd':afst_yymmdd, 'start_sod':afst_start_sod, 'stop_sod':afst_stop_sod})
        del afst_obs_id, afst_yymmdd, afst_start_sod, afst_stop_sod

        # Get Observation IDs for preceding and following pointings
        dt_pre = point['stop_sod'].clip(max=gti_pre_sod['stop']) - point['start_sod'].clip(min=gti_pre_sod['start'])
        upre = np.argmax(dt_pre)
        dt_pre = dt_pre[upre]
        assert dt_pre > 0
        obs_id_pre = point[upre]['obs_id']
        yymmdd_pre = point[upre]['yymmdd']
        dt_pos = point['stop_sod'].clip(max=gti_pos_sod['stop']) - point['start_sod'].clip(min=gti_pos_sod['start'])
        upos = np.argmax(dt_pos)
        dt_pos = dt_pos[upos]
        assert dt_pos > 0
        obs_id_pos = point[upos]['obs_id']
        yymmdd_pos = point[upos]['yymmdd']
        del point

        # Save GTI files for preceding and following pointings
        gtifile_pre = tempdir+obs0.type+'_'+obs0.id+'_pre.gti'
        gti_pre_Table = Table({'START':[gti_pre['start']] * u.s, 'STOP':[gti_pre['stop']] * u.s}, names=('START','STOP'))
        gtihdr_pre = BATSS_gtihdr(gti_pre_Table)
        hdu_pre = fits.BinTableHDU(gti_pre_Table, header=gtihdr_pre)
        hdu_pre.writeto(gtifile_pre, overwrite=True)
        gtifile_pos = tempdir+obs0.type+'_'+obs0.id+'_pos.gti'
        gti_pos_Table = Table({'START':[gti_pos['start']] * u.s, 'STOP':[gti_pos['stop']] * u.s}, names=('START','STOP'))
        gtihdr_pos = BATSS_gtihdr(gti_pos_Table)
        hdu_pos = fits.BinTableHDU(gti_pos_Table, header=gtihdr_pos)
        hdu_pos.writeto(gtifile_pos, overwrite=True)

        # Perform BATSURVEY analysis on preceding and following pointings
        obs0.src_name = 'BATSS '+coord_str # Include BATSS source name
        obs0.src_name_tex = 'BATSS '+coord_str_tex # TeX formatted
        obs0.eband = eband
        for flag_pre in [True, False]:
            print(f'{70*"="} {datetime.now():%c}')
            f = open(txtfile, 'a')
            if flag_pre:
                print('PRECEDING POINTING. ',end='')
                f.write(f'\n{95*"="}\nPRECEDING POINTING. ')
                prefix = 'pre'
                gtifile = gtifile_pre
                obs_id = obs_id_pre
                yymmdd_point = yymmdd_pre
            else:
                print('FOLLOWING POINTING. ',end='')
                f.write(f'\n{95*"="}\nFOLLOWING POINTING. ')
                prefix = 'pos'
                gtifile = gtifile_pos
                obs_id = obs_id_pos
                yymmdd_point = yymmdd_pos
            yyyy_mm_point = '20'+yymmdd_point[:2]+'_'+yymmdd_point[2:4]
            print(f'Observation ID: {obs_id}')
            f.write(f'Observation ID: {obs_id}\n')
            # Get coding fraction of source from attitude data
            gti = fits.getdata(gtifile,1)
            w = ((att['time'] >= gti['start'])
                & (att['time'] <= gti['stop'])).nonzero()[0]
            assert len(w) > 0
            #print(f'Attitude records found within GTI: {len(w)}')
            w0 = (att[w]['flags'] == flag_settled).nonzero()[0]
            assert len(w0) > 0
            w = w[w0]
            #print(f'Settled records: {len(w)}')
            w0 = (att[w]['obs_id'] == obs_id).nonzero()[0]
            if len(w0) == 0:
                str_out = ('WARNING: No settled attitude records found for'
                    f' Observation {obs_id}')
                print(str_out)
                f.write('\t'+str_out+'\n')
                obs_id0, obs_id0_pos = np.unique(att[w]['obs_id'],
                    return_inverse=True)
                obs_id0_cts = np.bincount(obs_id0_pos)
                imax = obs_id0_cts.argmax()
                str_out = (f'\tUsing most frequent Obs ID: {obs_id0[imax]}'
                    f' ({obs_id0_cts[imax]} records)')
                print(str_out)
                f.write(str_out+'\n')
                obs_id = obs_id0[imax]
                w0 = (obs_id0_pos == imax).nonzero()[0]
                assert len(w0) > 0
                del obs_id0, obs_id0_pos, obs_id0_cts, imax
            w = w[w0]
            w0 = w[len(w)//2]
            ra0 = att[w0]['pointing'][0]
            dec0 = att[w0]['pointing'][1]
            roll0 = att[w0]['pointing'][2]
            # Modify pchdr astrometry
            pchdr = BAT_astrmod(pchdr, ra=ra0, dec=dec0, roll=roll0)
            #fits.PrimaryHDU(pcmap, pchdr).writeto(datadir+'test_pchdr_'
            #    +prefix+'.fits', overwrite=True) #TEMP
            pcwcs = wcs.WCS(pchdr)
            pix = pcwcs.all_world2pix([[pos.ra.deg, pos.dec.deg]],
                1)[0].round().astype(int)[::-1] # For [y,x] indexing!
            pix = pix.clip(1, dims_pcmap) - 1
            pcodefr0 = 100 * pcmap[pix[0], pix[1]]
            str_out = f'Source coding fraction: {pcodefr0:6.2f}%. '
            print(str_out, end='')
            f.write(str_out)
            if pcodefr0 == 0:
                str_out = 'Pointing skipped'
                print(str_out)
                f.write(str_out+'\n')
                if flag_pre:
                    obs0.cat_pre = []
                else:
                    obs0.cat_pos = []
                continue
            print('Downloading pointing data... ', end='')
            t1 = datetime.now()
            obsdir = datadir+prefix+'_'+obs0.type+'_'+obs0.id+'/'
            command = ['wget'   # basic command
                ' -q'           # turn off output
                ' -r -l0'       # recursive retrieval (max depth 0)
                ' -nH'          # no host-prefixed directories
                ' --cut-dirs=7' # also ignore 7 directories
                ' -np'          # do not ascend to parent directory
                f' --directory-prefix={obsdir}' # top directory for output
                ' --no-check-certificate' # don't check server certificate
                ' -c'           # continue partial downloading
                ' -N'           # use same timestamping as remote file
                " -R'index*'"   # reject all 'index*' files
                ' -erobots=off' # turn off Robots Exclusion Standard
                ' --retr-symlinks' # download symbolic links
                ' http://heasarc.gsfc.nasa.gov/FTP/swift/data/obs/'
                f'{yyyy_mm_point}//{obs_id}/'+s for s in ['bat/','auxil/']]
            for command0 in command:
                subp.run(command0.split(' '))
            str_out = f'({(datetime.now()-t1).seconds}s)'
            print('done '+str_out)
            f.write(f'Pointing data downloaded {str_out}\n')
            f.close()

            # Loop over DPH and SNAPSHOT imaging
            datadir_in = obsdir
            cat_tex = []
            for flag_dph in [False, True]:
                gti_ntries = 0
                while gti_ntries < 2:
                    gti_ntries += 1
                    print(f'{70*"-"} {datetime.now():%c}')
                    print(('DPH' if flag_dph else 'SNAPSHOT')+' loop:')
                    print(f'  GTI loop {gti_ntries}: '+
                        ('Standard filtering' if gti_ntries == 1
                            else 'USERGTI filtering only'))
                    datadir_out = (obsdir+'results_'
                        +eband.name+('_dph' if flag_dph else '')+'/')
                    # BATSURVEY command
                    command = ['batsurvey',
                        datadir_in, datadir_out,
                        'energybins='+eband.str,
                        'elimits='+eband.str,
                        'incatalog='+catfile_in,
                        'ncleaniter=2', #Always clean DPH
                        # Apply DPH keyword
                        'timesep='+('DPH' if flag_dph else 'SNAPSHOT'),
                        'filtnames='+('all' if gti_ntries == 1
                            else ('global,pointing,filter_file,startracker,'
                                'st_lossfcn,data_flags,earthconstraints,'
                                'remove_midnight,occultation,usergti')),
                        'gtifile='+gtifile,
                        # Minimum exposure threshold
                        'expothresh=150.0']
                    print(' '.join(command))
                    subp.run(command)
                    # Find if master GTI file was created
                    gtifile_out = glob.glob(datadir_out+'gti/master.gti')
                    if len(gtifile_out) > 0:
                        if gti_ntries == 1:
                            gti_text = 'Standard'
                        elif gti_ntries == 2:
                            gti_text = 'Standard failed. USERGTI only'
                        break
                    else:
                        if gti_ntries == 1:
                            print('Standard GTI filtering failed. ', end='')
                            if flag_dph:
                                print('DPH binning does not work with '
                                    'USERGTI. Aborting')
                                gti_text = ('Standard failed. DPH binning '
                                    'does not work with USERGTI filtering')
                                break
                            else:
                                print('Standard GTI filtering failed.'
                                    ' Trying USERGTI only')
                        elif gti_ntries == 2:
                            print('Standard GTI and USERGTI filtering failed.'
                                ' Aborting')
                            gti_text = 'Standard and USERGTI failed'
                # Get output catalogs
                cat_out = []
                catfile_out = glob.glob(datadir_out+'point_*/point_*_2.cat')
                catfile = (datadir+prefix+'_'+obs0.type+'_'+obs0.id
                    +'_'+coord_str+'_'+eband.name
                    +('_dph' if flag_dph else '')+'.cat')
                if len(catfile_out) > 0:
                    print(('DPH' if flag_dph else 'SNAPSHOT')
                        +' catalogs found:', len(catfile_out))
                else:
                    print('Warning: No '+('DPH' if flag_dph else 'SNAPSHOT')
                        +' catalogs found. Skipping')
                for catfile_out0 in catfile_out:
                    print('Catalog file: '+catfile_out0)
                    t_ss = os.path.basename(catfile_out0).split('_')[1]
                    print(f' {t_ss[:4]}-{t_ss[4:7]}-{t_ss[7:9]}'
                        f':{t_ss[9:11]}...', end='')
                    cat0, hdr0 = fits.getdata(catfile_out0, 1, header=True)
                    cat0_name = cat0['name'].strip()
                    #cat0['name'] = cat0['name'].strip()
                    #cat0['rate'] /= 0.16 #[cts/cm2/sec]
                    #cat0['cent_rate'] /= 0.16
                    #cat0['rate_err'] /= 0.16
                    #cat0['bkg_var'] /= 0.16
                    w = (cat0_name == 'BATSS_'+coord_str).nonzero()[0]
                    if len(w) > 0:
                        cat0 = Table(cat0)
                        for w0 in w:
                            if len(cat_out) == 0:
                                cat0[w0]['CATNUM'] = 1
                                cat_out = Table(cat0[w0])
                                hdr_out = hdr0
                                hdr_out.remove('HISTORY', ignore_missing=True,
                                    remove_all=True)
                                hdr_out['EXTNAME'] = 'BATSURVEY_CATALOG'
                                hdr_out['HDUNAME'] = 'BATSURVEY_CATALOG'
                                # Index for new sources in catalog
                                hdr_out['NEWSRCIN'] = 2
                            else:
                                cat0[w0]['CATNUM'] = hdr_out['NEWSRCIN']
                                hdr_out['NEWSRCIN'] += 1
                                cat_out.add_row(cat0[w0])
                # Save catalog file
                n_det = len(cat_out)
                with open(txtfile,'a') as f:
                    f.write(f'\n{"DPH" if flag_dph else "SNAPSHOT"}'
                        ' processing:\n')
                    f.write(f'GTI filtering: {gti_text}\n')
                    f.write(f'Detections: {n_det if n_det > 0 else "NONE"}\n')
                    if n_det > 0:
                        fits.BinTableHDU(cat_out, hdr_out).writeto(catfile,
                            overwrite=True)
                        print(f'Saved {n_det} detection(s) of'
                            f' BATSS_{coord_str} to file {catfile}')
                        f.write('   '.join([' #',
                            f'{"Time_start":23s}', f'{"Time_stop":23s}',
                            f'{"Exp[s]":7s}', f'{"CF[%]":6s}',
                            'S/N(pix)','S/N(fit)'])+'\n')
                        for cat0 in cat_out:
                            f.write('   '.join([f'{cat0["CATNUM"]:2}',
                                met2Time(cat0['TIME']).iso,
                                met2Time(cat0['TIME_STOP']).iso,
                                f'{cat0["EXPOSURE"]:7.1f}',
                                f'{100*cat0["PCODEFR"]:6.2f}',
                                f'{cat0["CENT_SNR"]:8.2f}',
                                f'{cat0["SNR"]:8.2f}'])
                                +'\n')
                            cat_tex.append({
                                'dt':cat0['TIME']-obs_t0,
                                'exp':cat0['EXPOSURE'],
                                'cf':100*cat0['PCODEFR'],
                                'cent_snr':cat0['CENT_SNR'],
                                'snr':cat0['SNR']
                                })
            if flag_pre:
                obs0.cat_pre = cat_tex
            else:
                obs0.cat_pos = cat_tex
        str_out = ('\nDONE. Processing time: '
            +str(datetime.now()-t0).split('.')[0])
        print(str_out)
        with open(txtfile, 'a') as f:
            f.write(str_out+'\n')
        print('Closed output text file: ', f.name)
    return obs