Ejemplo n.º 1
0
def flatten_spectra(input_file,output_file, error=None):

    iraf.fit1d(input_file,'tmp.fits',type='fit',interact=False,
               functio='legendre',order=2)
    iraf.imarith(input_file,'/','tmp.fits','tmp2.fits')

    hdu = pyfits.open('tmp2.fits')[0]
    header = hdu.header
    data = hdu.data
    t = np.r_[data.T,data.T,data.T]
    pyfits.PrimaryHDU(t.T,header).writeto('tmp3.fits')
    
    iraf.fit1d('tmp3.fits','tmp4.fits',type='fit',interact=False,
               functio='spline3',order=100,low_rej=2.,high_rej=2.)
    hdu2 = pyfits.open('tmp4.fits')[0]
    data2 = hdu2.data
    pyfits.PrimaryHDU(data2[:,data2.shape[1]/3.:data2.shape[1]*2./3.],
                      header).writeto('tmp5.fits')
    iraf.imarith('tmp5.fits','*','tmp.fits','tmp6.fits')
    d6 = pyfits.open('tmp6.fits')[0].data
    # d7 = d6/np.mean(d6,axis=1)[:,np.newaxis]
    # pyfits.PrimaryHDU(d7,header).writeto('tmp7.fits')
    iraf.imarith(input_file,'/','tmp6.fits',output_file)
#    os.system('rm tmp*.fits')
    
    return np.mean(d6,axis=1)[:,np.newaxis]
Ejemplo n.º 2
0
def background(fs=None):
    iraf.cd('work')
    # Get rectified science images
    if fs is None:
        fs = glob('nrm/sci*nrm*.fits')
    if len(fs) == 0:
        print "WARNING: No rectified images for background-subtraction."
        iraf.cd('..')
        return

    if not os.path.exists('bkg'):
        os.mkdir('bkg')

    for f in fs:
        print("Subtracting background for %s" % f)
        # Make sure dispaxis is set correctly
        pyfits.setval(f, 'DISPAXIS', value=1)

        # the outfile name is very similar, just change folder prefix and
        # 3-char stage substring
        outfile = f.replace('nrm','bkg')
        # We are going to use fit1d instead of the background task
        # Go look at the code for the background task: it is literally a wrapper for 1D
        # but it removes the BPM option. Annoying.
        iraf.unlearn(iraf.fit1d)
        iraf.fit1d(input=f + '[SCI]', output='tmpbkg.fits', bpm=f + '[BPM]',
                   type='difference', sample='52:949', axis=2,
                   interactive='no', naverage='1', function='legendre',
                   order=5, low_reject=1.0, high_reject=1.0, niterate=5,
                   grow=0.0, mode='hl')

        # Copy the background subtracted frame into the rectified image
        # structure.
        # Save the sky spectrum as extension 3
        hdutmp = pyfits.open('tmpbkg.fits')
        hdu = pyfits.open(f)
        skydata = hdu[1].data - hdutmp[0].data
        hdu[1].data[:, :] = hdutmp[0].data[:, :]

        hdu.append(pyfits.ImageHDU(skydata))
        hdu[3].header['EXTNAME'] = 'SKY'
        hdu[3].data[hdu['BPM'] == 1] = 0.0

        # Add back in the median sky level for things like apall and lacosmicx
        hdu[1].data[:, :] += np.median(skydata)
        hdu[1].data[hdu['BPM'] == 1] = 0.0
        hdutmp.close()
        hdu.writeto(outfile, clobber=True)  # saving the updated file
        # (data changed)
        os.remove('tmpbkg.fits')
    iraf.cd('..')
Ejemplo n.º 3
0
def background(fs=None):
    iraf.cd('work')
    # Get rectified science images
    if fs is None:
        fs = glob('nrm/sci*nrm*.fits')
    if len(fs) == 0:
        print "WARNING: No rectified images for background-subtraction."
        iraf.cd('..')
        return

    if not os.path.exists('bkg'):
        os.mkdir('bkg')

    for f in fs:
        print("Subtracting background for %s" % f)
        # Make sure dispaxis is set correctly
        pyfits.setval(f, 'DISPAXIS', value=1)

        # the outfile name is very similar, just change folder prefix and
        # 3-char stage substring
        outfile = f.replace('nrm','bkg')
        # We are going to use fit1d instead of the background task
        # Go look at the code for the background task: it is literally a wrapper for 1D
        # but it removes the BPM option. Annoying.
        iraf.unlearn(iraf.fit1d)
        iraf.fit1d(input=f + '[SCI]', output='tmpbkg.fits', bpm=f + '[BPM]',
                   type='difference', sample='52:949', axis=2,
                   interactive='no', naverage='1', function='legendre',
                   order=5, low_reject=1.0, high_reject=1.0, niterate=5,
                   grow=0.0, mode='hl')

        # Copy the background subtracted frame into the rectified image
        # structure.
        # Save the sky spectrum as extension 3
        hdutmp = pyfits.open('tmpbkg.fits')
        hdu = pyfits.open(f)
        skydata = hdu[1].data - hdutmp[0].data
        hdu[1].data[:, :] = hdutmp[0].data[:, :]

        hdu.append(pyfits.ImageHDU(skydata))
        hdu[3].header['EXTNAME'] = 'SKY'
        hdu[3].data[hdu['BPM'] == 1] = 0.0

        # Add back in the median sky level for things like apall and lacosmicx
        hdu[1].data[:, :] += np.median(skydata)
        hdu[1].data[hdu['BPM'] == 1] = 0.0
        hdutmp.close()
        hdu.writeto(outfile, clobber=True)  # saving the updated file
        # (data changed)
        os.remove('tmpbkg.fits')
    iraf.cd('..')
Ejemplo n.º 4
0
 def reduce2d(self, obj, arc, do_error=None):
     images = obj.name  
     do_error = do_error if do_error is not None else self.do_error
     hduorig= fits.open(images)
     os.remove(images)
     hduimg = fits.PrimaryHDU(data=hduorig[0].data,header=hduorig[0].header)
     hduimg.writeto(images)
     hduvarimg = fits.PrimaryHDU(data=hduorig[1].data,header=hduorig[0].header)
     rt.rmexist([obj.var])
     hduvarimg.writeto(obj.var)    
     rt.prepare_image(images, do_error=do_error)
     self.rssidentify(arc)
     nxpix = obj.header['NAXIS1']
     rt.loadparam(self.rssconfig, ['iraf.transform', 'iraf.fit1d'])
     config= self.rssconfig
     if self.do_error:
         iraf.errorpars.errtype='uniform'
         iraf.errorpars.resample='no'
         iraf.samplepars.axis=1
         iraf.samplepars.setParam('naverage', config['iraf.fit1d']['naverage'])
         iraf.samplepars.setParam('low_reject', config['iraf.fit1d']['low_reject'])
         iraf.samplepars.setParam('high_reject', config['iraf.fit1d']['high_reject'])
         iraf.samplepars.setParam('niterate', config['iraf.fit1d']['niterate'])
         iraf.samplepars.setParam('grow', config['iraf.fit1d']['grow'])                   
     rt.rmexist([obj.wave, obj.bkg, obj.wave_var, obj.bkg_var])
     # Wavelength calibration	   
     iraf.transform(obj.noext, obj.wave, fitnames = arc.noext, logfiles = self.logfile)
     iraf.fit1d(obj.wave, obj.sky, "fit", axis=2)                    
     iraf.imarith(obj.wave, "-", obj.sky, obj.bkg)
     # Calibrated error file
     if do_error:
         iraf.transform(obj.var, obj.wave_var, fitnames = arc.noext, logfiles = self.logfile)
         iraf.gfit1d(obj.wave, 'tmptab.tab', function=config['iraf.fit1d']['function'], 
                     order=config['iraf.fit1d']['order'], xmin=1, xmax=nxpix, interactive='no')		       
         iraf.tabpar('tmptab.tab',"rms", 1, Stdout=self.logfile)
         rmsval = float(iraf.tabpar.value)
         tmpval=rmsval*rmsval
         print('RMS of the fit (for VAR plane propagation) = %s ' %rmsval)
         iraf.imexpr("a+b",obj.bkg_var, a=obj.wave_var, b=tmpval)
         print("**** Done with image %s ****" % (images)) 
Ejemplo n.º 5
0
def normalize_and_merge(reduced_science_files):
    current_directory = os.getcwd()

    for k in range(len(reduced_science_files)):

        remove_file(current_directory, "norm.dummyI.fits")
        remove_file(current_directory, "norm.dummyIa.fits")
        remove_file(current_directory, "norm.dummyII.fits")
        remove_file(current_directory, "norm.dummyIIa.fits")
        remove_file(current_directory, "norm.dummyIII.fits")
        remove_file(current_directory, "norm.dummyIV.fits")
        remove_file(current_directory, "norm.dummyV.fits")
        remove_file(current_directory, "norm.dummy1.fits")
        remove_file(current_directory, "norm.dummy2.fits")
        remove_file(current_directory, "norm.dummy3.fits")
        remove_file(current_directory, "norm.dummy4.fits")
        remove_file(current_directory, "norm.dummy5.fits")
        remove_file(current_directory, "norm.dummy6.fits")
        remove_file(current_directory, "norm.dummy7.fits")
        remove_file(current_directory, "norm.dummy8.fits")

        ranges = np.loadtxt("ranges.lis", delimiter=",")
        aperturlist = open("test.norm.apertures.lis", "w")

        merge = reduced_science_files[k].replace(".extracted.fits",
                                                 ".norm.fits")

        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummyI.fits"),
                   apertures="5:14",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummyII.fits"),
                   apertures="35:42",
                   format="multispec")

        iraf.fit1d(place_here("norm.dummyI.fits"),
                   place_here("norm.dummyIa.fits"),
                   naverage=1,
                   axis=2,
                   type="fit",
                   low_rej=1.0,
                   high_rej=2.0,
                   order=2,
                   niterate=2,
                   func="spline3",
                   sample="*")
        iraf.fit1d(place_here("norm.dummyII.fits"),
                   place_here("norm.dummyIIa.fits"),
                   naverage=1,
                   axis=2,
                   type="fit",
                   low_rej=1.0,
                   high_rej=2.0,
                   order=2,
                   niterate=2,
                   func="spline3",
                   sample="*")

        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy1.fits"),
                   apertures="1",
                   format="multispec",
                   w1="4544")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy2.fits"),
                   apertures="2",
                   format="multispec",
                   w1="4569")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy3.fits"),
                   apertures="3:7",
                   format="multispec")
        iraf.scopy(place_here("norm.dummyIa.fits"),
                   place_here("norm.dummy4.fits"),
                   apertures="8:10",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy5.fits"),
                   apertures="11:37",
                   format="multispec")
        iraf.scopy(place_here("norm.dummyIIa.fits"),
                   place_here("norm.dummy6.fits"),
                   apertures="38:40",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy7.fits"),
                   apertures="41:50",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy8.fits"),
                   apertures="51",
                   format="multispec",
                   w2="7591")

        iraf.scombine("@normalization.list.lis",
                      place_here("norm.dummyIII.fits"),
                      group='apertures')

        iraf.fit1d(place_here("norm.dummyIII.fits"),
                   place_here("norm.dummyIV.fits"),
                   naverage=1,
                   axis=1,
                   type="fit",
                   low_rej=0.8,
                   high_rej=2.0,
                   order=7,
                   niterate=4,
                   func="spline3",
                   sample="*")

        iraf.sarith(reduced_science_files[k], "/",
                    place_here("norm.dummyIV.fits"),
                    place_here("norm.dummyV.fits"))

        iraf.fit1d(place_here("norm.dummyV.fits"),
                   merge,
                   naverage=1,
                   axis=1,
                   type="ratio",
                   low_rej=0.2,
                   high_rej=2.0,
                   order=1,
                   niterate=4,
                   func="chebyshev",
                   sample="*")

        iraf.hedit(merge, "INSTRUME", 'TLS-echelle')

        for i in range(len(ranges)):
            apertures = int(ranges[i][0])

            new_name = merge.replace(".fits", "." + str(apertures) + ".fits")

            input1 = os.path.join(current_directory, merge)
            output = os.path.join(current_directory, new_name)

            iraf.scopy(input1,
                       output,
                       w1=ranges[i][1],
                       w2=ranges[i][2],
                       apertur=apertures,
                       format="multispec")

            aperturlist.write(new_name + "\n")

        aperturlist.close()

        new_name_merged = reduced_science_files[k].replace(
            ".extracted.fits", ".merged.fits")

        iraf.scombine("@test.norm.apertures.lis", new_name_merged, group='all')

        cut_for_ston = new_name_merged.replace("merged", "cut")
        iraf.scopy(new_name_merged,
                   cut_for_ston,
                   w1=5603,
                   w2=5612,
                   format="multispec",
                   apertures="")
        stddev = iraf.imstat(cut_for_ston,
                             Stdout=1,
                             fields="stddev",
                             format="no")
        ston = 1 / float(stddev[0])

        iraf.hedit(new_name, "STON", ston)

        for i in range(len(ranges)):
            apertures = int(ranges[i][0])
            os.remove(
                os.path.join(
                    merge.replace(".fits", "." + str(apertures) + ".fits")))

        os.remove(os.path.join(current_directory, "test.norm.apertures.lis"))

        remove_file(current_directory, "norm.dummyI.fits")
        remove_file(current_directory, "norm.dummyIa.fits")
        remove_file(current_directory, "norm.dummyII.fits")
        remove_file(current_directory, "norm.dummyIIa.fits")
        remove_file(current_directory, "norm.dummyIII.fits")
        remove_file(current_directory, "norm.dummyIV.fits")
        remove_file(current_directory, "norm.dummyV.fits")
        remove_file(current_directory, "norm.dummy1.fits")
        remove_file(current_directory, "norm.dummy2.fits")
        remove_file(current_directory, "norm.dummy3.fits")
        remove_file(current_directory, "norm.dummy4.fits")
        remove_file(current_directory, "norm.dummy5.fits")
        remove_file(current_directory, "norm.dummy6.fits")
        remove_file(current_directory, "norm.dummy7.fits")
        remove_file(current_directory, "norm.dummy8.fits")
    print("Success! Produced files *merged.fits, *norm.fits")
Ejemplo n.º 6
0
def lacos(_input0, output='clean.fits', outmask='mask.fits', gain=1.3, readn=9, xorder=9, yorder=9, sigclip=4.5, sigfrac=0.5, objlim=1, verbose=True, interactive=False):
    # print "LOGX:: Entering `lacos` method/function in %(__file__)s" %
    # globals()
    import ntt
    from ntt.util import delete
    import sys
    import re
    import os
    import string
    from pyraf import iraf
    import numpy as np

    oldoutput, galaxy, skymod, med5 = 'oldoutput.fits', 'galaxy.fits', 'skymod.fits', 'med5.fits'
    blk, lapla, med3, med7, sub5, sigima, finalsel = 'blk.fits', 'lapla.fits', 'med3.fits', 'med7.fits', 'sub5.fits', 'sigima.fits', 'finalsel.fits'
    deriv2, noise, sigmap, firstsel, starreject = 'deriv2.fits', 'noise.fits', 'sigmap.fits', 'firstsel.fits', 'starreject.fits'
    inputmask = 'inputmask.fits'
    # set some parameters in standard IRAF tasks
    iraf.convolve.bilinear = 'no'
    iraf.convolve.radsym = 'no'
    # create Laplacian kernel
    # laplkernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])
    f = open('_kernel', 'w')
    f.write('0 -1 0;\n-1 4 -1;\n0 -1 0')
    f.close()
    # create growth kernel
    f = open('_gkernel', 'w')
    f.write('1 1 1;\n1 1 1;\n1 1 1')
    f.close()
    gkernel = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
    delete(galaxy)
    delete(skymod)
    delete(oldoutput)

    if not output:
        output = _input0
    else:
        delete(output)
        iraf.imcopy(_input0, output, verbose='no')

    delete('_xxx.fits,_yyy.fits')
    iraf.imcopy(_input0 + '[350:550,*]', '_xxx.fits', verbose='no')
    _input = '_xxx.fits'

    arrayinput, headerinput = ntt.cosmics.fromfits(_input, verbose=False)
    ntt.cosmics.tofits(outmask, np.float32(
        arrayinput - arrayinput), headerinput, verbose=False)

    # subtract object spectra if desired
    iraf.fit1d(_input, galaxy, "fit", axis=2, order=9, func="leg", low=4.,
               high=4., nav=1, inter='no', sample="*", niter=3, grow=0, cursor="")
    iraf.imarith(_input, "-", galaxy, oldoutput)
    # Subtract sky lines
    iraf.fit1d(oldoutput, skymod, "fit", axis=1, order=5, func="leg", low=4., high=4.,
               inter='no', sample="*", nav=1, niter=3, grow=0, cursor="")
    iraf.imarith(oldoutput, "-", skymod, oldoutput)

    arrayoldoutput, headeroldoutput = ntt.cosmics.fromfits(
        oldoutput, verbose=False)
    # add object spectra to sky model
    iraf.imarith(skymod, "+", galaxy, skymod)
    delete(med5)
    # add median of residuals to sky model
    iraf.median(oldoutput, med5, 5, 5, zlor='INDEF',
                zhir='INDEF', verbose='no')
#    m5 = ndimage.filters.median_filter(_inputarray, size=5, mode='mirror')
    iraf.imarith(skymod, "+", med5, med5)
    # take second-order derivative (Laplacian) of input image
    # kernel is convolved with subsampled image, in order to remove negative
    # pattern around high pixels
    delete(blk)
    delete(lapla)
    iraf.blkrep(oldoutput, blk, 2, 2)
    iraf.convolve(blk, lapla, '_kernel')
    iraf.imreplace(lapla, 0, upper=0, lower='INDEF')
    delete(deriv2)
    delete(noise)
    iraf.blkavg(lapla, deriv2, 2, 2, option="average")
    # create noise model
    iraf.imutil.imexpr(expr='sqrt(a*' + str(gain) + '+' + str(readn) +
                       '**2)/' + str(gain), a=med5, output=noise, verbose='no')
    iraf.imreplace(med5, 0.00001, upper=0, lower='INDEF')
    # divide Laplacian by noise model
    delete(sigmap)
    iraf.imutil.imexpr(expr='(a/b)/2', a=deriv2, b=noise,
                       output=sigmap, verbose='no')
    # removal of large structure (bright, extended objects)
    delete(med5)
    iraf.median(sigmap, med5, 5, 5, zlo='INDEF', zhi='INDEF', verbose='no')
    iraf.imarith(sigmap, "-", med5, sigmap)
    # find all candidate cosmic rays
    # this selection includes sharp features such as stars and HII regions

    arraysigmap, headersigmap = ntt.cosmics.fromfits(sigmap, verbose=False)
    arrayf = np.where(arraysigmap < sigclip, 0, arraysigmap)
    arrayf = np.where(arrayf > 0.1, 1, arrayf)
    ntt.cosmics.tofits(firstsel, np.float32(
        arrayf), headersigmap, verbose=False)

    # compare candidate CRs to median filtered image
    # this step rejects bright, compact sources from the initial CR list
    # subtract background and smooth component of objects
    delete(med3)
    iraf.median(oldoutput, med3, 3, 3, zlo='INDEF', zhi='INDEF', verbose='no')
    delete(med7)
    delete('_' + med3)
    iraf.median(med3, med7, 7, 7, zlo='INDEF', zhi='INDEF', verbose='no')
    iraf.imutil.imexpr(expr='(a-b)/c', a=med3, b=med7,
                       c=noise, output='_' + med3, verbose='no')
    iraf.imreplace('_' + med3, 0.01, upper=0.01, lower='INDEF')
    # compare CR flux to object flux
    delete(starreject)
    iraf.imutil.imexpr(expr='a+b+c', a=firstsel, b=sigmap,
                       c="_" + med3, output=starreject, verbose='no')
    # discard if CR flux <= objlim * object flux
    iraf.imreplace(starreject, 0, upper=objlim, lower='INDEF')
    iraf.imreplace(starreject, 1, lower=0.5, upper='INDEF')
    iraf.imarith(firstsel, "*", starreject, firstsel)

    # grow CRs by one pixel and check in original sigma map
    arrayfirst, headerfirst = ntt.cosmics.fromfits(firstsel, verbose=False)
    arraygfirst = ntt.cosmics.my_convolve_with_FFT2(arrayfirst, gkernel)

    arraygfirst = np.where(arraygfirst > 0.5, 1, arraygfirst)
    arraygfirst = arraygfirst * arraysigmap
    arraygfirst = np.where(arraygfirst < sigclip, 0, arraygfirst)
    arraygfirst = np.where(arraygfirst > 0.1, 1, arraygfirst)

    # grow CRs by one pixel and lower detection limit
    sigcliplow = sigfrac * sigclip
    # Finding neighbouring pixels affected by cosmic rays
    arrayfinal = ntt.cosmics.my_convolve_with_FFT2(arraygfirst, gkernel)
    arrayfinal = np.where(arrayfinal > 0.5, 1, arrayfinal)
    arrayfinal = arrayfinal * arraysigmap
    arrayfinal = np.where(arrayfinal < sigcliplow, 0, arrayfinal)
    arrayfinal = np.where(arrayfinal > 0.1, 1, arrayfinal)

    # determine number of CRs found in this iteration
    arraygfirst = (1 - (arrayfinal - arrayfinal)) * arrayfinal
    npix = [str(int(np.size(np.where(arraygfirst > 0.5)) / 2.))]
    # create cleaned output image; use 3x3 median with CRs excluded
    arrayoutmask = np.where(arrayfinal > 1, 1, arrayfinal)
    ntt.cosmics.tofits(outmask, np.float32(
        arrayoutmask), headerfirst, verbose=False)
    delete(inputmask)
    arrayinputmask = (1 - (10000 * arrayoutmask)) * arrayoldoutput
    ntt.cosmics.tofits(inputmask, np.float32(
        arrayinputmask), headerfirst, verbose=False)
    delete(med5)
    iraf.median(inputmask, med5, 5, 5, zloreject=-
                9999, zhi='INDEF', verbose='no')
    iraf.imarith(outmask, "*", med5, med5)
    delete('_yyy.fits')
    iraf.imutil.imexpr(expr='(1-a)*b+c', a=outmask, b=oldoutput,
                       c=med5, output='_yyy.fits', verbose='no')
    # add sky and object spectra back in
    iraf.imarith('_yyy.fits', "+", skymod, '_yyy.fits')
    # cleanup and get ready for next iteration
    if npix == 0:
        stop = yes
      # delete temp files
    iraf.imcopy('_yyy.fits', output + '[350:550,*]', verbose='no')
    delete(blk + "," + lapla + "," + deriv2 + "," + med5)
    delete(med3 + "," + med7 + "," + noise + "," + sigmap)
    delete(firstsel + "," + starreject)
    delete(finalsel + "," + inputmask)
    delete(oldoutput + "," + skymod + "," + galaxy)
    delete("_" + med3 + ",_" + sigmap)
    delete('_kernel' + "," + '_gkernel')
    delete(outmask)
    delete('_xxx.fits,_yyy.fits')