コード例 #1
0
 def makenoiselessimage(self, band, galfile, magz, psffile, save=0, gain=1.0):
    """
    Creates a noiseless convolved image
    """
    assert os.path.exists(psffile), "PSF image %s does not exist." % psffile
    broot = self.root + '_%s' % band
    outfile = broot + '_sim.fits'
    outfile_nonoise = broot + '_sim_noiseless.fits'
    # print outfile,xmax,ymax,galfile,magz,gain
    iraf.unlearn('artdata')
    iraf.unlearn('mkobjects')
    iraf.artdata.dynrange=1.e5
    print "Running iraf.mkobject for %s..." % band
    iraf.mkobjects(outfile_nonoise, output="", title="", ncols=self.xmax, 
       nlines=self.ymax, header="", background=0.0, objects=galfile,
       xoffset=0., yoffset=0., star="gaussian", radius=0.1,
       beta=2.5, ar=1., pa=0., distance=1., exptime=1., 
       magzero=magz, gain=gain, rdnoise=0., poisson=0,
       seed=2, comments=1)
    print "Convolving with PSF..."
    if _hconvolve:
       imhconvolve(outfile_nonoise, psffile, outfile, overwrite=True)
    else:
       print "No hconvolve..."
       outimage = pyfits.getdata(outfile_nonoise)
       psfimage = pyfits.getdata(psffile)
       outimage2 = fftconvolve(outimage, psfimage, mode='same')
       h = pyfits.open(outfile, mode='update')
       h[0].data = outimage2
       h.flush()
       h.close()
    self.noiselessimages[band] = outfile
    os.remove(outfile_nonoise)
コード例 #2
0
    def _make_const_image(self, value):
        """
        Creates a constant image
        """
        # unlearn some iraf tasks
        iraf.unlearn('mkpattern')

        # get a random filename
        tmpfile1 = get_random_filename('t', '.fits')

        iraf.mkpattern(input=tmpfile1,
                       output="",
                       pattern="constant",
                       option="replace",
                       v1=value,
                       v2=0.0,
                       size=1,
                       title="",
                       pixtype="real",
                       ndim=2,
                       ncols=self.dimension[1],
                       nlines=self.dimension[0],
                       n3=1,
                       n4=1,
                       n5=1,
                       n6=1,
                       n7=1,
                       header="")
        return tmpfile1
コード例 #3
0
ファイル: full_calibrate.py プロジェクト: bjanesh/odi-tools
def trim_img(img,x1,x2,y1,y2):
    """
    Trim a stacked image based on the coordinates given. The image is trimmed
    using ``imcopy`` through pyraf, so the x and y pixel ranges should be given
    in the correct ``imcopy`` format. ``[x1:x2,y1:y2]``

    Parameters
    ---------
    img : str
        String containing name of the image currently in use
    x1 : int
        Pixel coordinate of x1
    x2 : int
        Pixel coordinate of x2
    y1 : int
        Pixel coordinate of y1
    y2 : int
        Pixel coordinate of y2

    Returns
    -------
    img : str
        The new image is given the extension ``.trim.fits``.

    """
    x1,x2 = x1,x2
    y1,y2 = y1,y2
    input = img.nofits()+'['+repr(x1)+':'+repr(x2)+','+repr(y1)+':'+repr(y2)+']'
    output =  img.nofits()+'.trim.fits'
    if not os.path.isfile(output):
        print 'Trimming image: ' ,img
        iraf.unlearn(iraf.imcopy)
        iraf.imcopy(input = input,output = output,verbose='no',mode='h')
コード例 #4
0
ファイル: reduceINT.py プロジェクト: eddienko/SamPy
def mkbiasINTWFC(filelist, type='median'):
    """
    Creates a master bias from given list of bias frames.
    Saves each extension to a different FITS file.

    Reads readnoise and gain from the FITS header, as each WFC chip has
    different values.

    :note: This function has been written specifically for INT WFC.

    :param filelist:
    :type filelist:
    :param type: combining parameter (default = median)
    :type type: string

    :return: None
    """
    input1 = ''
    for line in filelist:
        input1 += line[0] + '[1],'
    input2 = input1.replace('[1]', '[2]')
    input3 = input1.replace('[1]', '[3]')
    input4 = input1.replace('[1]', '[4]')
    inputs = [input1, input2, input3, input4]

    #note there are four SCI extensions
    for ext, input in enumerate(inputs):
        iraf.unlearn('zerocombine')
        iraf.zerocombine(input=input[:-1],
                         output='BIAS%i.fits' % (ext+1),
                         combine=type,
                         rdnoise='READNOIS',
                         gain='GAIN')
コード例 #5
0
ファイル: gmosaiceti.py プロジェクト: mmorage/DRAGONS
    def execute(self):
        """Execute pyraf task: gmosaic"""
        log.debug("GmosaicETI.execute()")

        # Populate object lists
        xcldict = copy(self.clparam_dict)
        for fil in self.file_objs:
            xcldict.update(fil.get_parameter())
        for par in self.param_objs:
            xcldict.update(par.get_parameter())
        iraf.unlearn(iraf.gmos.gmosaic)

        # Use setParam to list the parameters in the logfile
        for par in xcldict:
            #Stderr and Stdout are not recognized by setParam
            if par != "Stderr" and par != "Stdout":
                gemini.gmos.gmosaic.setParam(par, xcldict[par])
        log.fullinfo("\nGMOSAIC PARAMETERS:\n")
        iraf.lpar(iraf.gmos.gmosaic, Stderr=xcldict["Stderr"], \
            Stdout=xcldict["Stdout"])

        # Execute the task using the same dict as setParam
        # (but this time with Stderr and Stdout)
        #from pprint import pprint
        #pprint(xcldict)
        try:
            gemini.gmos.gmosaic(**xcldict)
        except:
            # catch hard crash
            raise RuntimeError("The IRAF task gmos.gmosaic failed")
        if gemini.gmos.gmosaic.status:
            # catch graceful exit upon error
            raise RuntimeError("The IRAF task gmos.gmosaic failed")
        else:
            log.fullinfo("The IRAF task gmos.gmosaic completed successfully")
コード例 #6
0
    def execute(self):
        """Execute pyraf task: gsappwave"""
        log.debug("GsappwaveETI.execute()")

        # Populate object lists
        xcldict = copy(self.clparam_dict)
        for fil in self.file_objs:
            xcldict.update(fil.get_parameter())
        for par in self.param_objs:
            xcldict.update(par.get_parameter())
        iraf.unlearn(iraf.gmos.gsappwave)

        # Use setParam to list the parameters in the logfile
        for par in xcldict:
            # Stderr and Stdout are not recognized by setParam
            if par != "Stderr" and par != "Stdout":
                gemini.gmos.gsappwave.setParam(par, xcldict[par])
        log.fullinfo("\nGSAPPWAVE PARAMETERS:\n")
        iraf.lpar(iraf.gmos.gsappwave, Stderr=xcldict["Stderr"], Stdout=xcldict["Stdout"])

        # Execute the task using the same dict as setParam
        # (but this time with Stderr and Stdout)
        # from pprint import pprint
        # pprint(xcldict)
        gemini.gmos.gsappwave(**xcldict)
        if gemini.gmos.gsappwave.status:
            raise Errors.OutputError("The IRAF task gmos.gsappwave failed")
        else:
            log.fullinfo("The IRAF task gmos.gsappwave completed successfully")
コード例 #7
0
ファイル: mosaic_combine.py プロジェクト: tddesjardins/ediscs
def bootstrapZP(image,cat='zp.cat',refcat='ref.cat',refzp=1.0):

    (ra,dec,mag,star)=np.loadtxt(cat,usecols=(0,1,2,3),unpack=True,
                                     dtype=np.dtype([('ra','<f10'),('dec','<f10'),('mag','<f10'),
                                                     ('star','<f10')]))

    (rra,rdec,rmag,rstar)=np.loadtxt(refcat,usecols=(0,1,2,3),unpack=True,
                                            dtype=np.dtype([('rra','<f10'),('rdec','<f10'),('rmag','<f10'),
                                                            ('rstar','<f10')]))

    #Grab only stars from the reference catalog
    refgood=np.where((rstar >= 0.98) & (rmag != 99.0) & (rmag > 17.0) & (rmag < 22.5))
    refcat=SkyCoord(ra=rra[refgood]*u.degree,dec=rdec[refgood]*u.degree)

    #Sort through and remove anything that is not a star and is not isolated
    #from other sources in the input catalog
    catgood=np.where((star >= 0.98) & (mag != 99.0))
    cat=SkyCoord(ra=ra[catgood]*u.degree,dec=dec[catgood]*u.degree)
    idx,d2d,_=cat.match_to_catalog_sky(refcat)
    _,d2d2,_=cat.match_to_catalog_sky(cat,2)
    final=np.where((d2d.arcsec < 0.5) & (d2d2.arcsec >= 5.0))
    
    diff=rmag[refgood][idx][final]-mag[catgood][final]
    imgZP=np.mean(diff)

    print '\n\tUsing '+str(len(diff))+' stars to calculate ZP...'
    print '\tMean ZP: '+str(round(imgZP,3))+' mag\n'

    scaleFactor=10.0**(0.4*(refzp-imgZP))

    iraf.unlearn('imcalc')
    iraf.imcalc(image,image[:-5]+'_scaled.fits','im1*'+str(scaleFactor))
コード例 #8
0
    def _compute_err_ext(self, sci_ext):
        """
        Compute the error image for a science image

        For a image in [e], the module computes the associated
        error image assuming a simple noise model with photon shot noise
        and readout noise.

        @param sci_ext: the input image
        @type sci_extsci_ext: string
        """
        # unlearn some iraf tasks
        iraf.unlearn('imexpr')
        # get a random filename
        tmpfile1 = get_random_filename('t', '.fits')

        # compute the error array
        expression = "sqrt(a + b*b)"
        iraf.imexpr(expr=expression,
                    output=tmpfile1,
                    a=sci_ext,
                    b=self.rdnoise,
                    Stdout=1)

        return tmpfile1
コード例 #9
0
def wavelength_solution(WORK_DIR, skip_exist=False):
  print '\n + Finding the wavelength solution\n'

  # calibrate wavelength
  # This step will require you to calibrate first arc by hand. The rest will be done automatically.
  # after l (automatic find other lines) change x and y order to 4
  # x - choose plots of the fit (per order, pixel, ...)

  iraf.unlearn('ecreidentify')
  iraf.unlearn('ecidentify')

  if observations[WORK_DIR]['REF_ARC']:
    try:
      print 'shutil.copy'
      shutil.copy(observations[WORK_DIR]['REF_ARC'], 'database/' + observations[WORK_DIR]['REF_ARC'].split('/')[-1])  
    except:
      pass
  
    for cal in observations[WORK_DIR]['calibs']:
      print 'iraf.ecreident'
      iraf.ecreident(images=cal+'.ec', referenc=observations[WORK_DIR]['REF_ARC'].replace('/ec', '/').split('/')[-1], refit='yes', shift=0, cradius=5, thresho=10)
      pass
    
  for cal in observations[WORK_DIR]['calibs']:
    print 'iraf.ecident'
    iraf.ecident(images=cal+'.ec', coordli='linelists$thar.dat', match=1, maxfeat=1800, ftype='emission', fwidth=4, cradius=5, thresho=2, minsep=2, functio='chebyshev', xorder=3, yorder=3, niterat=5, lowreje=3, highreje=3, autowri='yes')
コード例 #10
0
ファイル: calib.py プロジェクト: abhimat/nirc2
def makedark(files, output):
    """
    Make dark image for NIRC2 data. Makes a calib/ directory
    and stores all output there. All output and temporary files
    will be created in a darks/ subdirectory. 

    files: integer list of the files. Does not require padded zeros.
    output: output file name. Include the .fits extension.
    """
    redDir = os.getcwd() + '/'  # Reduce directory.
    curDir = redDir + 'calib/'
    darkDir = util.trimdir(curDir + 'darks/')
    rawDir = util.trimdir(os.path.abspath(redDir + '../raw') + '/')

    util.mkdir(curDir)
    util.mkdir(darkDir)
    
    _out = darkDir + output
    _outlis = darkDir + 'dark.lis'
    util.rmall([_out, _outlis])

    darks = [rawDir + 'n' + str(i).zfill(4) + '.fits' for i in files]

    f_on = open(_outlis, 'w')
    f_on.write('\n'.join(darks) + '\n')
    f_on.close()
    
    ir.unlearn('imcombine')
    ir.imcombine.combine = 'median'
    ir.imcombine.reject = 'sigclip'
    ir.imcombine.nlow = 1
    ir.imcombine.nhigh = 1
    ir.imcombine('@' + _outlis, _out)
コード例 #11
0
def setup_drizzle(imgsize):
    """Setup drizzle parameters for NIRC2 data.
    @param imgsize: The size (in pixels) of the final drizzle image.
    This assumes that the image will be square.
    @type imgsize: int
    @param type: str
    """
    print 'Setting up drizzle'
    # Setup the drizzle parameters we will use
    ir.module.load('stsdas', doprint=0, hush=1)
    ir.module.load('analysis', doprint=0, hush=1)
    ir.module.load('dither', doprint=0, hush=1)
    ir.unlearn('drizzle')
    ir.drizzle.outweig = ''
    ir.drizzle.in_mask = ''
    ir.drizzle.wt_scl = 1
    ir.drizzle.outnx = imgsize
    ir.drizzle.outny = imgsize
    ir.drizzle.pixfrac = 1
    ir.drizzle.kernel = 'lanczos3'
    ir.drizzle.scale = 1
    ir.drizzle.coeffs = distCoef
    ir.drizzle.xgeoim = distXgeoim
    ir.drizzle.ygeoim = distYgeoim
    ir.drizzle.shft_un = 'input'
    ir.drizzle.shft_fr = 'output'
    ir.drizzle.align = 'center'
    ir.drizzle.expkey = 'ITIME'
    ir.drizzle.in_un = 'counts'
    ir.drizzle.out_un = 'counts'
コード例 #12
0
def rotate_2005_lgsao():
    # Need to rotate an image.
    ir.unlearn('rotate')
    ir.rotate.boundary = 'constant'
    ir.rotate.constant = 0
    ir.rotate.interpolant = 'spline3'
    ir.rotate.ncols = 1040
    ir.rotate.nlines = 1040
    
    ir.rotate.xin = 528
    ir.rotate.xout = 540
    ir.rotate.yin = 645
    ir.rotate.yout = 410
    ir.rotate(workdir + 'mag05jullgs_kp.fits', workdir + 'mag05jullgs_kp_rot.fits', 190)

    ir.rotate.xin = 535
    ir.rotate.xout = 540
    ir.rotate.yin = 655
    ir.rotate.yout = 410
    ir.rotate(workdir + 'mag05jullgs_h.fits', workdir + 'mag05jullgs_h_rot.fits', 190)

    ir.rotate.xin = 572
    ir.rotate.xout = 540
    ir.rotate.yin = 694
    ir.rotate.yout = 410
    ir.rotate(workdir + 'mag05jullgs_lp.fits', workdir + 'mag05jullgs_lp_rot.fits', 190)

    ir.rotate.xin = 483
    ir.rotate.xout = 483
    ir.rotate.yin = 612
    ir.rotate.yout = 612
    ir.rotate(workdir + 'mag04jul.fits', workdir + 'mag04jul_rot.fits', 1)
コード例 #13
0
ファイル: odi_helpers.py プロジェクト: sjanowiecki/odi-tools
def stack_images(refimg):
    from astropy.io import fits
    from pyraf import iraf
    print refimg
    fitsref = fits.open(refimg)
    hduref = fitsref[0]
    objname = hduref.header['object']
    filter_name = hduref.header['filter']
    sky_med = odi.find_new_bg(refimg, filter_name)
    # sky_med = hduref.header['skybg']
    output = objname+'_'+filter_name+'.fits'
    output_bpm = objname+'_'+filter_name+'_bpm.pl'
    iraf.unlearn(iraf.immatch.imcombine, iraf.imutil.imarith)
    iraf.immatch.imcombine(odi.scaledpath+'*'+filter_name+'*.fits', 'temp', combine='average', reject='none', offsets='wcs', masktype='goodvalue', maskval=0, blank=-999, scale='none', zero='none', lthresh=-900, hthresh=60000)
    # iraf.imutil.imarith.setParam('operand1','temp')
    # iraf.imutil.imarith.setParam('op','+')
    # iraf.imutil.imarith.setParam('operand2',sky_med)
    # iraf.imutil.imarith.setParam('result',output)
    # iraf.imutil.imarith.setParam('verbose','yes')
    # iraf.imutil.imarith(mode='h')
    iraf.imutil.imexpr('(a != -999) ? a + b : -999',output,'temp.fits',sky_med)
    iraf.imutil.imexpr('a < 0',output_bpm, output)
    iraf.imutil.imdelete('temp', verify='no')
    iraf.unlearn(iraf.imutil.hedit)
    iraf.imutil.hedit.setParam('images',output)
    iraf.imutil.hedit.setParam('fields','BPM')
    iraf.imutil.hedit.setParam('value',output_bpm)
    iraf.imutil.hedit.setParam('add','yes')
    iraf.imutil.hedit.setParam('addonly','no')
    iraf.imutil.hedit.setParam('verify','no')
    iraf.imutil.hedit.setParam('update','yes')
    iraf.imutil.hedit(show='no', mode='h')
    # iraf.immatch.imcombine(reprojpath+'*.fits', 'test', expm='exp.pl', combine='average', reject='none', offsets='wcs', masktype='goodvalue', maskval=0, blank=-999, scale='none', zero='none', lthresh=-900, hthresh=60000)  

    return output
コード例 #14
0
ファイル: odi_helpers.py プロジェクト: sjanowiecki/odi-tools
def reproject_ota(img, ota, rad, decd):
    from pyraf import iraf
    image = odi.illcorpath+'illcor_'+ota+'.'+str(img[16:])
    imout = odi.reprojpath+'reproj_'+ota+'.'+str(img[16:])
    iraf.mscred(_doprint=0)
    iraf.clobber='no'
    iraf.unlearn(iraf.mscred.mscimage)
    iraf.mscred.mscimage.format='image'
    iraf.mscred.mscimage.pixmask='yes'
    iraf.mscred.mscimage.verbose='yes'
    iraf.mscred.mscimage.wcssour='parameters'
    iraf.mscred.mscimage.ref=''
    iraf.mscred.mscimage.ra=rad
    iraf.mscred.mscimage.dec=decd
    iraf.mscred.mscimage.scale=0.11
    iraf.mscred.mscimage.rotation=0.0
    iraf.mscred.mscimage.blank=-999
    iraf.mscred.mscimage.interpo='poly5'
    iraf.mscred.mscimage.minterp='poly5'
    iraf.mscred.mscimage.nxbl=4096
    iraf.mscred.mscimage.nybl=4096
    iraf.mscred.mscimage.fluxcon='yes'
    iraf.mscred.mscimage(image,imout)
    
    return
コード例 #15
0
def do_axecore(filters,
               basepath,
               back='yes',
               extrfwhm=4.0,
               drzfwhm=0.0,
               backfwhm=10.0,
               orient='no',
               slitless_geom='no',
               cont_model='gauss',
               sampling='drizzle',
               np=5,
               interp=1):
    olddir = os.getcwd()
    os.chdir(basepath)
    for filter in filters:
        if filter[0].lower() == 'g':  #grism, continue
            listfile = os.path.join(basepath, filter + '.lis')
            conffile = "WFC3.IR." + filter + ".V2.5.conf"
            iraf.unlearn('axecore')
            iraf.axecore(listfile,
                         conffile,
                         extrfwhm=extrfwhm,
                         drzfwhm=drzfwhm,
                         back=back,
                         backfwhm=backfwhm,
                         orient=orient,
                         slitless_geom=slitless_geom,
                         cont_model=cont_model,
                         sampling=sampling,
                         np=np,
                         interp=interp)
    os.chdir(olddir)
コード例 #16
0
def trim_img(img, x1, x2, y1, y2):
    """
    Trim a stacked image based on the coordinates given. The image is trimmed
    using ``imcopy`` through pyraf, so the x and y pixel ranges should be given
    in the correct ``imcopy`` format. ``[x1:x2,y1:y2]``

    Parameters
    ---------
    img : str
        String containing name of the image currently in use
    x1 : int
        Pixel coordinate of x1
    x2 : int
        Pixel coordinate of x2
    y1 : int
        Pixel coordinate of y1
    y2 : int
        Pixel coordinate of y2

    Returns
    -------
    img : str
        The new image is given the extension ``.trim.fits``.

    """
    x1, x2 = x1, x2
    y1, y2 = y1, y2
    input = img.nofits() + '[' + repr(x1) + ':' + repr(x2) + ',' + repr(
        y1) + ':' + repr(y2) + ']'
    output = img.nofits() + '.trim.fits'
    if not os.path.isfile(output):
        print 'Trimming image: ', img
        iraf.unlearn(iraf.imcopy)
        iraf.imcopy(input=input, output=output, verbose='no', mode='h')
コード例 #17
0
ファイル: reduceINT.py プロジェクト: RainW7/SamPy
def mkflatINTWFC(data, combine='median', reject='avsigclip'):
    """
    Creates a master flat from a given list of flat frames.

    Reads readnoise and gain from the FITS header, as each WFC chip has
    different values.

    :note: ccdproc
    """
    for filter in data:
        input1 = ''
        for file in data[filter]:
            input1 += file + '[1],'

        input2 = input1.replace('[1]', '[2]')
        input3 = input1.replace('[1]', '[3]')
        input4 = input1.replace('[1]', '[4]')

        inputs = [input1, input2, input3, input4]

        for ext, input in enumerate(inputs):
            print input
            iraf.unlearn('flatcombine')
            iraf.flatcombine(input=input[:-1],
                             output='FLAT_{0:>s}_{1:d}.fits'.format(
                                 filter, ext + 1),
                             combine=combine,
                             reject=reject,
                             rdnoise='READNOIS',
                             gain='GAIN')
コード例 #18
0
ファイル: reduceINT.py プロジェクト: eddienko/SamPy
def mkflatINTWFC(data, combine='median', reject='avsigclip'):
    """
    Creates a master flat from a given list of flat frames.

    Reads readnoise and gain from the FITS header, as each WFC chip has
    different values.

    :note: ccdproc
    """
    for filter in data:
        input1 = ''
        for file in data[filter]:
            input1 += file + '[1],'

        input2 = input1.replace('[1]', '[2]')
        input3 = input1.replace('[1]', '[3]')
        input4 = input1.replace('[1]', '[4]')

        inputs = [input1, input2, input3, input4]

        for ext, input in enumerate(inputs):
            print input
            iraf.unlearn('flatcombine')
            iraf.flatcombine(input=input[:-1],
                             output='FLAT_{0:>s}_{1:d}.fits'.format(filter, ext + 1),
                             combine=combine,
                             reject=reject,
                             rdnoise='READNOIS',
                             gain='GAIN')
コード例 #19
0
ファイル: pipeline.py プロジェクト: SEDMachine/AstroObject
 def subtract_bias(self):
     """Subtracting Bias Frame"""
     iraf.unlearn(iraf.ccdproc)
     iraf.ccdproc(self.data.iraf.modatfile(), 
         ccdtype="", fixpix="no", overscan="no", trim ="no", zerocor="yes", darkcor="no", flatcor ="no", 
         zero=self.bias.iin("Bias"))
     self.data.idone()
コード例 #20
0
ファイル: pipeline.py プロジェクト: SEDMachine/AstroObject
 def subtract_dark(self):
     """Subtracting Dark Frame"""
     iraf.unlearn(iraf.ccdproc)
     iraf.ccdproc(self.data.iraf.modatfile(), 
         ccdtype="", fixpix="no", overscan="no", trim ="no", zerocor="no", darkcor="yes", flatcor ="no", 
         dark=self.dark.iin("Dark"))
     self.data.idone()
コード例 #21
0
ファイル: rusalt.py プロジェクト: Youssef15015/rusalt
def stdsensfunc(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d*c?.fits')
    if len(fs) == 0:
        print "WARNING: No extracted spectra to create sensfuncs from."
        iraf.cd('..')
        return

    if not os.path.exists('std'):
        os.mkdir('std')
    for f in fs:
        # Put the file in the std directory, but last 3 letters of sens
        outfile = 'std/' + f.split('/')[1]
        outfile = outfile.replace('x1d', 'sens').replace('sci', 'std')
        outfile = outfile.replace('.fits', '.dat')
        # if the object name is in the list of standard stars from pysalt
        if isstdstar(f):
            # We use pysalt here because standard requires a
            # dispersion correction which was already taken care of above
            # Write out an ascii file that pysalt.specsens can read
            asciispec = 'std/std.ascii.dat'
            spectoascii(f, asciispec)
            # run specsens
            stdfile = pysaltpath + '/data/standards/spectroscopic/m%s.dat' % pyfits.getval(f, 'OBJECT').lower().replace('-','_')
            extfile = pysaltpath + '/data/site/suth_extinct.dat'
            iraf.unlearn(iraf.specsens)
            iraf.specsens(asciispec, outfile, stdfile, extfile,
                          airmass=pyfits.getval(f, 'AIRMASS'),
                          exptime=pyfits.getval(f, 'EXPTIME'), function='poly',
                          order=11, clobber=True, mode='h', thresh=1e10)
            # delete the ascii file
            os.remove(asciispec)
    iraf.cd('..')
コード例 #22
0
ファイル: rusaltD.py プロジェクト: Youssef15015/rusalt
def fixpix(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('nrm/sci*nrm*.fits')
    if len(fs) == 0:
        print "WARNING: No rectified images to fix."
        iraf.cd('..')
        return
    if not os.path.exists('fix'):
        os.mkdir('fix')
    for f in fs:
        outname = f.replace('nrm', 'fix')
        # Copy the file to the fix directory
        shutil.copy(f, outname)
        # Set all of the BPM pixels = 0
        h = pyfits.open(outname, mode='update')
        h['SCI'].data[h['BPM'].data == 1] = 0
        # Grab the CRM extension from the lax file
        laxhdu = pyfits.open(f.replace('nrm', 'lax'))
        h.append(pyfits.ImageHDU(data=laxhdu['CRM'].data.copy(),
                                 header=laxhdu['CRM'].header.copy(),
                                 name='CRM'))
        h.flush()
        h.close()
        laxhdu.close()

        # Run iraf's fixpix on the cosmic rays, not ideal,
        # but better than nothing because apall doesn't take a bad pixel mask
        iraf.unlearn(iraf.fixpix)
        iraf.flpr()
        iraf.fixpix(outname + '[SCI]', outname + '[CRM]', mode='hl')
    iraf.cd('..')
コード例 #23
0
ファイル: rusalt_raw.py プロジェクト: Youssef15015/RuSalt-1
def identify2d(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('mos/arc*mos*.fits')
    if len(fs) == 0:
        print "WARNING: No mosaiced (2D) specidentify."
        # Change directories to fail gracefully
        iraf.cd('..')
        return
    arcfs, arcgas = get_ims(fs, 'arc')
    if not os.path.exists('id2'):
        os.mkdir('id2')

    lampfiles = {
        'Th Ar': 'ThAr.salt',
        'Xe': 'Xe.salt',
        'Ne': 'NeAr.salt',
        'Cu Ar': 'CuAr.salt',
        'Ar': 'Argon_hires.salt',
        'Hg Ar': 'HgAr.salt'
    }
    for i, f in enumerate(arcfs):
        ga = arcgas[i]

        # find lamp and corresponding linelist
        lamp = pyfits.getval(f, 'LAMPID')
        lampfn = lampfiles[lamp]
        if pyfits.getval(f, 'GRATING') == 'PG0300' and lamp == 'Ar':
            lampfn = 'Argon_lores.swj'

        ccdsum = int(pyfits.getval(f, 'CCDSUM').split()[1])

        # linelistpath is a global variable defined in beginning, path to
        # where the line lists are.
        lamplines = pysaltpath + '/data/linelists/' + lampfn
        print(lamplines)

        # img num should be right before the .fits
        imgnum = f[-9:-5]
        # run pysalt specidentify
        idfile = 'id2/arc%05.2fid2%04i' % (float(ga), int(imgnum)) + '.db'
        iraf.unlearn(iraf.specidentify)
        iraf.flpr()
        iraf.specidentify(
            images=f,
            linelist=lamplines,
            outfile=idfile,
            guesstype='rss',
            inter=True,  # automethod='FitXcor',
            rstep=600 / ccdsum,
            rstart=200 / ccdsum,
            startext=1,
            clobber='yes',
            #startext=1, clobber='yes',
            verbose='no',
            mode='hl',
            logfile='salt.log',
            mdiff=2,
            function='legendre')
    iraf.cd('..')
コード例 #24
0
ファイル: rusalt.py プロジェクト: saurabhwjha/rusalt
def stdsensfunc(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d*c?.fits')
    if len(fs) == 0:
        print "WARNING: No extracted spectra to create sensfuncs from."
        iraf.cd('..')
        return

    if not os.path.exists('std'):
        os.mkdir('std')
    for f in fs:
        # Put the file in the std directory, but last 3 letters of sens
        outfile = 'std/' + f.split('/')[1]
        outfile = outfile.replace('x1d', 'sens').replace('sci', 'std')
        outfile = outfile.replace('.fits', '.dat')
        # if the object name is in the list of standard stars from pysalt
        if isstdstar(f):
            # We use pysalt here because standard requires a
            # dispersion correction which was already taken care of above
            # Write out an ascii file that pysalt.specsens can read
            asciispec = 'std/std.ascii.dat'
            spectoascii(f, asciispec)
            # run specsens
            stdfile = pysaltpath + '/data/standards/spectroscopic/m%s.dat' % pyfits.getval(f, 'OBJECT').lower().replace('-','_')
            extfile = pysaltpath + '/data/site/suth_extinct.dat'
            iraf.unlearn(iraf.specsens)
            iraf.specsens(asciispec, outfile, stdfile, extfile,
                          airmass=pyfits.getval(f, 'AIRMASS'),
                          exptime=pyfits.getval(f, 'EXPTIME'), function='poly',
                          order=11, clobber=True, mode='h', thresh=1e10)
            # delete the ascii file
            os.remove(asciispec)
    iraf.cd('..')
コード例 #25
0
    def execute(self):
        """Execute pyraf task: gmosaic"""
        log.debug("GmosaicETI.execute()")

        # Populate object lists
        xcldict = copy(self.clparam_dict)
        for fil in self.file_objs:
            xcldict.update(fil.get_parameter())
        for par in self.param_objs:
            xcldict.update(par.get_parameter())
        iraf.unlearn(iraf.gmos.gmosaic)

        # Use setParam to list the parameters in the logfile
        for par in xcldict:
            #Stderr and Stdout are not recognized by setParam
            if par != "Stderr" and par != "Stdout":
                gemini.gmos.gmosaic.setParam(par, xcldict[par])
        log.fullinfo("\nGMOSAIC PARAMETERS:\n")
        iraf.lpar(iraf.gmos.gmosaic, Stderr=xcldict["Stderr"], \
            Stdout=xcldict["Stdout"])

        # Execute the task using the same dict as setParam
        # (but this time with Stderr and Stdout)
        #from pprint import pprint
        #pprint(xcldict)
        try:
            gemini.gmos.gmosaic(**xcldict)
        except:
            # catch hard crash
            raise RuntimeError("The IRAF task gmos.gmosaic failed")
        if gemini.gmos.gmosaic.status:
            # catch graceful exit upon error
            raise RuntimeError("The IRAF task gmos.gmosaic failed")
        else:
            log.fullinfo("The IRAF task gmos.gmosaic completed successfully")
コード例 #26
0
ファイル: odi_helpers.py プロジェクト: bjanesh/odi-tools
def tpv2tan_hdr(img, ota):
    image = odi.reprojpath+'reproj_'+ota+'.'+img.stem()
    # change the CTYPENs to be TANs if they aren't already
    tqdm.write('TPV -> TAN in {:s}'.format(image))
    iraf.imutil.hedit.setParam('images',image)
    iraf.imutil.hedit.setParam('fields','CTYPE1')
    iraf.imutil.hedit.setParam('value','RA---TAN')
    iraf.imutil.hedit.setParam('add','yes')
    iraf.imutil.hedit.setParam('addonly','no')
    iraf.imutil.hedit.setParam('verify','no')
    iraf.imutil.hedit.setParam('update','yes')
    iraf.imutil.hedit(show='no', mode='h')

    iraf.imutil.hedit.setParam('images',image)
    iraf.imutil.hedit.setParam('fields','CTYPE2')
    iraf.imutil.hedit.setParam('value','DEC--TAN')
    iraf.imutil.hedit.setParam('add','yes')
    iraf.imutil.hedit.setParam('addonly','no')
    iraf.imutil.hedit.setParam('verify','no')
    iraf.imutil.hedit.setParam('update','yes')
    iraf.imutil.hedit(show='no', mode='h')

    # delete any PV keywords
    # leaving them in will give you trouble with the img wcs
    iraf.unlearn(iraf.imutil.hedit)
    iraf.imutil.hedit.setParam('images',image)
    iraf.imutil.hedit.setParam('fields','PV*')
    iraf.imutil.hedit.setParam('delete','yes')
    iraf.imutil.hedit.setParam('verify','no')
    iraf.imutil.hedit.setParam('update','yes')
    iraf.imutil.hedit(show='no', mode='h')
コード例 #27
0
ファイル: rusalt.py プロジェクト: saurabhwjha/rusalt
def fixpix(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('nrm/sci*nrm*.fits')
    if len(fs) == 0:
        print "WARNING: No rectified images to fix."
        iraf.cd('..')
        return
    if not os.path.exists('fix'):
        os.mkdir('fix')
    for f in fs:
        outname = f.replace('nrm', 'fix')
        # Copy the file to the fix directory
        shutil.copy(f, outname)
        # Set all of the BPM pixels = 0
        h = pyfits.open(outname, mode='update')
        h['SCI'].data[h['BPM'].data == 1] = 0
        # Grab the CRM extension from the lax file
        laxhdu = pyfits.open(f.replace('nrm', 'lax'))
        h.append(pyfits.ImageHDU(data=laxhdu['CRM'].data.copy(),
                                 header=laxhdu['CRM'].header.copy(),
                                 name='CRM'))
        h.flush()
        h.close()
        laxhdu.close()

        # Run iraf's fixpix on the cosmic rays, not ideal,
        # but better than nothing because apall doesn't take a bad pixel mask
        iraf.unlearn(iraf.fixpix)
        iraf.flpr()
        iraf.fixpix(outname + '[SCI]', outname + '[CRM]', mode='hl')
    iraf.cd('..')
コード例 #28
0
def make_bpms(images):
  for i in range(len(images)):
    for key in OTA_dictionary:
      mask,masked_array = mask_ota(images[i],OTA_dictionary[key])
      hdu = fits.PrimaryHDU(mask.astype(float))
      # create string for mask fits name
      mask_name = bppath+'mask_'+OTA_dictionary[key]+'.'+str(images[i][-27:49])
      BPM = mask_name.replace('fits','pl')
      if not os.path.isfile(mask_name):
        hdu.writeto(mask_name,clobber=True)
      if not os.path.isfile(mask_name.replace('fits','pl')):
        iraf.unlearn(iraf.imutil.imcopy)
        iraf.imutil.imcopy.setParam('input',mask_name)
        iraf.imutil.imcopy.setParam('output',mask_name.replace('fits','pl'))
        iraf.imutil.imcopy.setParam('verbose','no')
        iraf.imutil.imcopy(mode='h')
      iraf.unlearn(iraf.imutil.hedit)
      iraf.imutil.hedit.setParam('images',images[i]+'['+str(key)+']')
      iraf.imutil.hedit.setParam('fields','BPM')
      iraf.imutil.hedit.setParam('value',BPM)
      iraf.imutil.hedit.setParam('add','no')
      iraf.imutil.hedit.setParam('addonly','no')
      iraf.imutil.hedit.setParam('verify','no')
      iraf.imutil.hedit.setParam('update','yes')
      iraf.imutil.hedit(mode='h')
  return
コード例 #29
0
ファイル: combFilter.py プロジェクト: XingxingHuang/APLUS
    def _medFilterMaskedWgtIm(self, imname, medfiltsize=5):
        """ Method to median filter an image, intended to be the
        detection weight image.  Default filter size is 5 pix.
        It first makes a mask from the input image so that no input
        zero pixels (which are specially flagged as having no weight)
        become nonzero after filtering.
        """
        tmpMed  = '_tmpMED.fits'
        tmpMask = '_tmpMask.fits'
        iraf.flpr()
        iraf.flpr(iraf.median)
        iraf.flpr(iraf.imcalc)

        iraf.unlearn(iraf.median)
        iraf.median.input   = imname
        iraf.median.output  = tmpMed
        iraf.median.xwindow = medfiltsize
        iraf.median.ywindow = medfiltsize
        iraf.median.mode = 'h'
        iraf.median()

        iraf.unlearn(iraf.imcalc)
        iraf.imcalc(imname, tmpMask, "if im1 .eq. 0 then 0 else 1")
        _instring = tmpMed+','+tmpMask
        iraf.imcalc(_instring, tmpMed, "im1 * im2")

        os.rename(tmpMed,imname)
        os.remove(tmpMask)
        iraf.flpr(iraf.median)
        iraf.flpr(iraf.imcalc)
        iraf.flpr()

        return
コード例 #30
0
    def execute(self):
        """Execute pyraf task: gemcombine"""
        log.debug("GemcombineETI.execute()")

        # Populate object lists
        xcldict = copy(self.clparam_dict)
        for fil in self.file_objs:
            xcldict.update(fil.get_parameter())
        for par in self.param_objs:
            xcldict.update(par.get_parameter())
        iraf.unlearn(iraf.gemcombine)

        # Use setParam to list the parameters in the logfile 
        for par in xcldict:
            #Stderr and Stdout are not recognized by setParam
            if par != "Stderr" and par !="Stdout":
                gemini.gemcombine.setParam(par,xcldict[par])
        log.fullinfo("\nGEMCOMBINE PARAMETERS:\n")
        iraf.lpar(iraf.gemcombine, Stderr=xcldict["Stderr"], \
            Stdout=xcldict["Stdout"])

        # Execute the task using the same dict as setParam
        # (but this time with Stderr and Stdout)
        try:
            gemini.gemcombine(**xcldict)
        except:
            # catch hard crash of the primitive
            raise Errors.OutputError("The IRAF task gemcombine failed")
        if gemini.gemcombine.status:
            # catch graceful exit on error
            raise Errors.OutputError("The IRAF task gemcombine failed")
        else:
            log.fullinfo("The IRAF task gemcombine completed successfully")
コード例 #31
0
def rotate_2005_lgsao():
    # Need to rotate an image.
    ir.unlearn('rotate')
    ir.rotate.boundary = 'constant'
    ir.rotate.constant = 0
    ir.rotate.interpolant = 'spline3'
    ir.rotate.ncols = 1040
    ir.rotate.nlines = 1040

    ir.rotate.xin = 528
    ir.rotate.xout = 540
    ir.rotate.yin = 645
    ir.rotate.yout = 410
    ir.rotate(workdir + 'mag05jullgs_kp.fits',
              workdir + 'mag05jullgs_kp_rot.fits', 190)

    ir.rotate.xin = 535
    ir.rotate.xout = 540
    ir.rotate.yin = 655
    ir.rotate.yout = 410
    ir.rotate(workdir + 'mag05jullgs_h.fits',
              workdir + 'mag05jullgs_h_rot.fits', 190)

    ir.rotate.xin = 572
    ir.rotate.xout = 540
    ir.rotate.yin = 694
    ir.rotate.yout = 410
    ir.rotate(workdir + 'mag05jullgs_lp.fits',
              workdir + 'mag05jullgs_lp_rot.fits', 190)

    ir.rotate.xin = 483
    ir.rotate.xout = 483
    ir.rotate.yin = 612
    ir.rotate.yout = 612
    ir.rotate(workdir + 'mag04jul.fits', workdir + 'mag04jul_rot.fits', 1)
コード例 #32
0
def makedark(files, output):
    """
    Make dark image for NIRC2 data. Makes a calib/ directory
    and stores all output there. All output and temporary files
    will be created in a darks/ subdirectory. 

    files: integer list of the files. Does not require padded zeros.
    output: output file name. Include the .fits extension.
    """
    redDir = os.getcwd() + '/'  # Reduce directory.
    curDir = redDir + 'calib/'
    darkDir = util.trimdir(curDir + 'darks/')
    rawDir = util.trimdir(os.path.abspath(redDir + '../raw') + '/')

    util.mkdir(curDir)
    util.mkdir(darkDir)

    _out = darkDir + output
    _outlis = darkDir + 'dark.lis'
    util.rmall([_out, _outlis])

    darks = [rawDir + 'n' + str(i).zfill(4) + '.fits' for i in files]

    f_on = open(_outlis, 'w')
    f_on.write('\n'.join(darks) + '\n')
    f_on.close()

    ir.unlearn('imcombine')
    ir.imcombine.combine = 'median'
    ir.imcombine.reject = 'sigclip'
    ir.imcombine.nlow = 1
    ir.imcombine.nhigh = 1
    ir.imcombine('@' + _outlis, _out)
コード例 #33
0
ファイル: bpm_tools.py プロジェクト: sjanowiecki/odi-tools
def make_bpms(img, ota):
    # for i in range(len(images)):
    #   for key in OTA_dictionary:
    # create string for mask fits name
    mask_name = odi.bppath+'mask_'+ota+'.'+str(img[16:])
    BPM = mask_name.replace('fits','pl')
    if not os.path.isfile(BPM):
        mask,gaps = odi.mask_ota(img,ota)
        hdu = odi.fits.PrimaryHDU(mask.astype(float))
        if not os.path.isfile(mask_name):
            hdu.writeto(mask_name,clobber=True)
        #if not os.path.isfile(mask_name.replace('fits','pl')):
            iraf.unlearn(iraf.imutil.imcopy)
            iraf.imutil.imcopy.setParam('input',mask_name)
            iraf.imutil.imcopy.setParam('output',mask_name.replace('fits','pl'))
            iraf.imutil.imcopy.setParam('verbose','no')
            iraf.imutil.imcopy(mode='h')
    iraf.unlearn(iraf.imutil.hedit)
    iraf.imutil.hedit.setParam('images',img+'['+ota+']')
    iraf.imutil.hedit.setParam('fields','BPM')
    iraf.imutil.hedit.setParam('value',BPM)
    iraf.imutil.hedit.setParam('add','yes')
    iraf.imutil.hedit.setParam('addonly','no')
    iraf.imutil.hedit.setParam('verify','no')
    iraf.imutil.hedit.setParam('update','yes')
    iraf.imutil.hedit(show='no', mode='h')
    if os.path.isfile(mask_name):
        os.remove(mask_name)
    return
コード例 #34
0
ファイル: odi_illcor.py プロジェクト: oboberg/odi-tools
def make_bpms(img, ota):
    # for i in range(len(images)):
    #   for key in OTA_dictionary:
    # create string for mask fits name
    mask_name = odi.bppath + 'mask_' + ota + '.' + img.stem()
    BPM = mask_name.replace('fits', 'pl')
    if not os.path.isfile(BPM):
        mask, gaps = odi.mask_ota(img, ota)
        hdu = odi.fits.PrimaryHDU(mask.astype(float))
        if not os.path.isfile(mask_name):
            hdu.writeto(mask_name, clobber=True)
            #if not os.path.isfile(mask_name.replace('fits','pl')):
            iraf.unlearn(iraf.imutil.imcopy)
            iraf.imutil.imcopy.setParam('input', mask_name)
            iraf.imutil.imcopy.setParam('output',
                                        mask_name.replace('fits', 'pl'))
            iraf.imutil.imcopy.setParam('verbose', 'no')
            iraf.imutil.imcopy(mode='h')
    iraf.unlearn(iraf.imutil.hedit)
    iraf.imutil.hedit.setParam('images', img.f + '[' + ota + ']')
    iraf.imutil.hedit.setParam('fields', 'BPM')
    iraf.imutil.hedit.setParam('value', BPM)
    iraf.imutil.hedit.setParam('add', 'yes')
    iraf.imutil.hedit.setParam('addonly', 'no')
    iraf.imutil.hedit.setParam('verify', 'no')
    iraf.imutil.hedit.setParam('update', 'yes')
    iraf.imutil.hedit(show='no', mode='h')
    if os.path.isfile(mask_name):
        os.remove(mask_name)
    return
コード例 #35
0
ファイル: reduceINT.py プロジェクト: RainW7/SamPy
def mkbiasINTWFC(filelist, type='median'):
    """
    Creates a master bias from given list of bias frames.
    Saves each extension to a different FITS file.

    Reads readnoise and gain from the FITS header, as each WFC chip has
    different values.

    :note: This function has been written specifically for INT WFC.

    :param filelist:
    :type filelist:
    :param type: combining parameter (default = median)
    :type type: string

    :return: None
    """
    input1 = ''
    for line in filelist:
        input1 += line[0] + '[1],'
    input2 = input1.replace('[1]', '[2]')
    input3 = input1.replace('[1]', '[3]')
    input4 = input1.replace('[1]', '[4]')
    inputs = [input1, input2, input3, input4]

    #note there are four SCI extensions
    for ext, input in enumerate(inputs):
        iraf.unlearn('zerocombine')
        iraf.zerocombine(input=input[:-1],
                         output='BIAS%i.fits' % (ext + 1),
                         combine=type,
                         rdnoise='READNOIS',
                         gain='GAIN')
コード例 #36
0
ファイル: calib.py プロジェクト: AtomyChan/JLU-python-code
def makedark(files, output):
    """
    Make dark image for NIRC2 data. Makes a calib/ directory
    and stores all output there. All output and temporary files
    will be created in a darks/ subdirectory. 

    files: integer list of the files. Does not require padded zeros.
    output: output file name. Include the .fits extension.
    """
    redDir = os.getcwd() + "/"  # Reduce directory.
    curDir = redDir + "calib/"
    darkDir = util.trimdir(curDir + "darks/")
    rawDir = util.trimdir(os.path.abspath(redDir + "../raw") + "/")

    util.mkdir(curDir)
    util.mkdir(darkDir)

    _out = darkDir + output
    _outlis = darkDir + "dark.lis"
    util.rmall([_out, _outlis])

    darks = [rawDir + "n" + str(i).zfill(4) + ".fits" for i in files]

    f_on = open(_outlis, "w")
    f_on.write("\n".join(darks) + "\n")
    f_on.close()

    ir.unlearn("imcombine")
    ir.imcombine.combine = "median"
    ir.imcombine.reject = "sigclip"
    ir.imcombine.nlow = 1
    ir.imcombine.nhigh = 1
    ir.imcombine("@" + _outlis, _out)
コード例 #37
0
def setup_drizzle(imgsize):
    """Setup drizzle parameters for NIRC2 data.
    @param imgsize: The size (in pixels) of the final drizzle image.
    This assumes that the image will be square.
    @type imgsize: int
    @param type: str
    """
    print 'Setting up drizzle'
    # Setup the drizzle parameters we will use
    ir.module.load('stsdas', doprint=0, hush=1)
    ir.module.load('analysis', doprint=0, hush=1)
    ir.module.load('dither', doprint=0, hush=1)
    ir.unlearn('drizzle')
    ir.drizzle.outweig = ''
    ir.drizzle.in_mask = ''
    ir.drizzle.wt_scl = 1
    ir.drizzle.outnx = imgsize
    ir.drizzle.outny = imgsize
    ir.drizzle.pixfrac = 1
    ir.drizzle.kernel = 'lanczos3'
    ir.drizzle.scale = 1
    ir.drizzle.coeffs = distCoef
    ir.drizzle.xgeoim = distXgeoim
    ir.drizzle.ygeoim = distYgeoim
    ir.drizzle.shft_un = 'input'
    ir.drizzle.shft_fr = 'output'
    ir.drizzle.align = 'center'
    ir.drizzle.expkey = 'ITIME'
    ir.drizzle.in_un = 'counts'
    ir.drizzle.out_un = 'counts'
コード例 #38
0
ファイル: reduce.py プロジェクト: svalenti/lcogtgemini
def scireduce(scifiles, rawpath):
    for f in scifiles:
        setupname = getsetupname(f)
        # gsreduce subtracts bias and mosaics detectors
        iraf.unlearn(iraf.gsreduce)
        iraf.gsreduce('@' + f, outimages=f[:-4]+'.mef', rawpath=rawpath, bias="bias",
                      fl_over=dooverscan, fl_fixpix='no', fl_flat=False,
                      fl_gmosaic=False, fl_cut=False, fl_gsappwave=False, fl_oversize=False)

        if is_GS:
            # Renormalize the chips to remove the discrete jump in the
            # sensitivity due to differences in the QE for different chips
            iraf.unlearn(iraf.gqecorr)
            iraf.gqecorr(f[:-4]+'.mef', outimages=f[:-4]+'.qe.fits', fl_keep=True, fl_correct=True,
                         refimages=setupname + '.arc.arc.fits',
                         corrimages=setupname +'.qe.fits', verbose=True)

            iraf.unlearn(iraf.gmosaic)
            iraf.gmosaic(f[:-4]+'.qe.fits', outimages=f[:-4] +'.fits')
        else:
            iraf.unlearn(iraf.gmosaic)
            iraf.gmosaic(f[:-4]+'.mef.fits', outimages=f[:-4] +'.fits')

        # Flat field the image
        hdu = pyfits.open(f[:-4]+'.fits', mode='update')
        hdu['SCI'].data /= pyfits.getdata(setupname+'.flat.fits', extname='SCI')
        hdu.flush()
        hdu.close()

        # Transform the data based on the arc  wavelength solution
        iraf.unlearn(iraf.gstransform)
        iraf.gstransform(f[:-4], wavtran=setupname + '.arc')
コード例 #39
0
ファイル: rusalt_raw.py プロジェクト: Youssef15015/RuSalt-1
def fluxcal(stdsfolder='./', fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d*c*.fits')
    if len(fs) == 0:
        print "WARNING: No science chip spectra to flux calibrate."
        iraf.cd('..')
        return

    if not os.path.exists('flx'):
        os.mkdir('flx')
    extfile = pysaltpath + '/data/site/suth_extinct.dat'
    stdfiles = glob(stdsfolder + '/std/*sens*c?.dat')
    print(stdfiles)
    for f in fs:
        outfile = f.replace('x1d', 'flx')
        chip = outfile[-6]
        hdu = pyfits.open(f)
        ga = f.split('/')[1][3:8]
        # Get the standard sensfunc with the same grating angle
        stdfile = None
        for stdf in stdfiles:
            if np.isclose(float(ga),
                          float(stdf.split('/')[stdf.count('/')][3:8]),
                          rtol=1e-2):
                # Get the right chip number
                if chip == stdf[-5]:
                    stdfile = stdf
                    break
        if stdfile is None:
            print('No standard star with grating-angle %s' % ga)
            continue
        # for each extracted aperture
        for i in range(hdu[0].data.shape[1]):
            # create an ascii file that pysalt can read
            asciiname = 'flx/sciflx.dat'
            outtmpname = 'flx/scical.dat'
            spectoascii(f, asciiname, i)
            # Run pysalt.speccal
            iraf.unlearn(iraf.speccal)
            iraf.flpr()
            iraf.speccal(asciiname,
                         outtmpname,
                         stdfile,
                         extfile,
                         airmass=pyfits.getval(f, 'AIRMASS'),
                         exptime=pyfits.getval(f, 'EXPTIME'),
                         clobber=True,
                         mode='h')
            # read in the flux calibrated ascii file and copy its
            # contents into a fits file
            flxcal = np.genfromtxt(outtmpname).transpose()
            hdu[0].data[0, i] = flxcal[1]
            hdu[0].data[2, i] = flxcal[2]
            # delete the ascii file
            os.remove(asciiname)
            os.remove(outtmpname)
        hdu.writeto(outfile, clobber=True)
    iraf.cd('..')
コード例 #40
0
ファイル: mosaic_combine.py プロジェクト: tddesjardins/ediscs
def mkScaleImage(image):

    hdr=pf.open(image)[0].header
    (mult,add)=(hdr['MSCSCALE'],hdr['MSCZERO'])
    iraf.unlearn('imcalc')
    if os.path.exists(image[:-5]+'_scale.fits'):
        os.remove(image[:-5]+'_scale.fits')
    iraf.imcalc(image,image[:-5]+'_scale.fits','(im1+'+str(add)+')*'+str(mult))
コード例 #41
0
ファイル: pipeline.py プロジェクト: SEDMachine/AstroObject
 def divide_flat(self):
     """Dividing by Flat Frame"""
     iraf.unlearn(iraf.ccdproc)
     iraf.ccdproc(self.data.iraf.inatfile(), 
         output=self.data.iraf.outatfile(append="-Flat"),
         flat=self.flat.iin("Flat"),
         ccdtype="", fixpix="no", overscan="no", trim ="no", zerocor="no", flatcor="yes", darkcor ="no")
     self.data.iraf.done()
コード例 #42
0
    def __init__(self):
        """
        Initializes the class
        """
        from pyraf import iraf
        from iraf import stsdas, analysis, dither

        # unlearn the task
        iraf.unlearn('drizzle')
コード例 #43
0
ファイル: mosaic_combine.py プロジェクト: tddesjardins/ediscs
def mkWeightMap(image,search=''):

    if search != '':
        imname=glob.glob(search)[0]
    else:
        imname=image
        
    iraf.unlearn('imcalc')
    iraf.imcalc(imname,'weight.fits','if im1 .eq. 0 then 0.0 else 1.0')
コード例 #44
0
ファイル: full_phot.py プロジェクト: sjanowiecki/odi-tools
def trim_img(img):
    x1,x2 = 2508,15798
    y1,y2 = 2216,15506
    input = img[:-5]+'['+repr(x1)+':'+repr(x2)+','+repr(y1)+':'+repr(y2)+']'
    output =  img[:-5]+'.trim.fits'
    if not os.path.isfile(output):
	print 'Trimming image: ' ,img
        iraf.unlearn(iraf.imcopy)
        iraf.imcopy(input = input,output = output,verbose='no',mode='h')
コード例 #45
0
ファイル: __init__.py プロジェクト: griffin-h/lcogtgemini
def skysub(scifiles, rawpath):
    for f in scifiles:
        # sky subtraction
        # output has an s prefixed on the front
        # This step is currently quite slow for Gemini-South data
        iraf.unlearn(iraf.gsskysub)
        iraf.gsskysub('t' + f[:-4], long_sample='*', fl_inter='no', fl_vardq=dodq,
                      naverage=-10, order=1, low_reject=2.0, high_reject=2.0,
                      niterate=10, mode='h')
コード例 #46
0
ファイル: reduce.py プロジェクト: svalenti/lcogtgemini
def skysub(scifiles, rawpath):
    for f in scifiles:
        # sky subtraction
        # output has an s prefixed on the front
        # This step is currently quite slow for Gemini-South data
        iraf.unlearn(iraf.gsskysub)
        iraf.gsskysub('t' + f[:-4], long_sample='*', fl_inter='no',
                      naverage=-10, order=1, low_reject=2.0, high_reject=2.0,
                      niterate=10, mode='h')
コード例 #47
0
ファイル: imagecube.py プロジェクト: durga2112/astro2013
def convolve_images_psf(images_with_headers):
    print("Convolving images (not implemented yet)")

    for i in range(0, len(images_with_headers)):

        original_filename = os.path.basename(images_with_headers[i][2])
        original_directory = os.path.dirname(images_with_headers[i][2])
        new_directory = original_directory + "/convolved/"
        #artificial_filename = new_directory + original_filename + "_pixelgrid.fits"
        #registered_filename = new_directory + original_filename  + "_registered.fits"
        input_directory = original_directory + "/registered/"
        input_filename = input_directory + original_filename  + "_registered.fits"
        print("Artificial filename: " + artificial_filename)
        print("Registered filename: " + registered_filename)
        print("Input filename: " + input_filename)
        if not os.path.exists(new_directory):
            os.makedirs(new_directory)

        #reading the science image:
        science_image = fits.getdata(input_filename)

        # if using a kernel image, then we first regrid the kernel to the same as in the science image, and we re-center the kernel:

        # create a fake image "apixel_kernel.fits"
        # the original kernel has a grid of 3645*3645 pixels and centered at (1822, 1822)
        # ncols = nlines = initial_number_of_rows * initial_pixelsize_of_the_kernel  / science_image_pixelsize
        # in the current case: 3645* 0.25 (arcsecs per pixel) / 2 (arcsecs per pixel) = 455.62
        artdata.mkpattern(input="apixel_kernel.fits", output="apixel_kernel.fits", pattern="constant", option="replace",v1=0., v2=1., size=1, title="", pixtype="real", ndim=2, ncols=455,nlines=455,n3=1, n4=1, n5=1, n6=1, n7=1, header="")

        #Then, tag the desired WCS in this fake image:
        #
        # unlearn some iraf tasks
        iraf.unlearn('ccsetwcs')
        #xref = yref = ncols/2 = nlines/2
        #xmag, ymag = pixel scale of science image
        iraf.ccsetwcs(images="apixel_kernel.fits", database="", solution="", xref=227.5, yref=227.5, xmag=2, ymag=2, xrotati=0.,yrotati=0.,lngref=0, latref=0, lngunit="hours", latunit="degrees", transpo="no", project="tan", coosyst="j2000", update="yes", pixsyst="logical", verbose="yes")

        # Then, register the fits file of interest to the WCS of the fake fits file
        #
        # unlearn some iraf tasks
        iraf.unlearn('wregister')

        iraf.wregister(input="Kernel_HiRes_PACS_70_to_SPIRE_500.fits", reference="apixel_kernel.fits", output="Kernel_P70_2_S500.fits", fluxconserve="yes")

        # then we get the data from the kernel
        kernel_image = pyfits.getdata('Kernel_P70_2_S500.fits')

        #several ways to do the convolution, but is best to use number 3 or 4:

        #3. 
        result3 = astropy.nddata.convolution.convolve.convolve(science_image, kernel_image) # got a segmentation fault - it needs an odd number of columns/rows for the kernel
        pyfits.writeto('science_image_convolved_3.fits',result3)

        #4. 
        result4 = astropy.nddata.convolution.convolve.convolve_fft(science_image,kernel_image) # worked OK - was the fastest thus far
        pyfits.writeto('science_image_convolved_4.fits',result4) 
コード例 #48
0
ファイル: shifts.py プロジェクト: themiyan/threedhst
def run_tweakshifts(asn_direct, verbose=False, clean=True):
    """
run_tweakshifts(asn_direct)
    
    asn_direct - filename of ASN table of direct images [...]_asn.fits
    
    This routine only uses dither.tweakshifts to compute the relative shifts of 
    the direct images
    """
    from pyraf import iraf
    from iraf import stsdas, dither

    no = iraf.no
    yes = iraf.yes
    INDEF = iraf.INDEF

    root = asn_direct.split('_asn.fits')[0]  #.lower()

    try:
        os.remove(root + '_tweak.fits')
    except:
        pass

    iraf.flpr()
    iraf.flpr()
    iraf.flpr()
    if clean:
        clean = iraf.yes
    else:
        clean = iraf.no

    iraf.unlearn('tweakshifts')

    status = iraf.tweakshifts(input=asn_direct, shiftfile='',
                     reference=root+'_tweak.fits',
                     output = root+'_shifts.txt', findmode = 'catalog',
                     gencatalog = 'daofind', sextractpars = '',
                     undistort = yes, computesig = yes, idckey = 'idctab', \
       clean = clean, verbose = no, catfile = '', xcol = 1, ycol = 2, \
       fluxcol = 3, fluxmax = INDEF, fluxmin = INDEF, fluxunits = 'counts', \
       nbright = INDEF, refcat = '', refxcol = 1, refycol = 2, rfluxcol = 3, \
       rfluxmax = INDEF, rfluxmin = INDEF, rfluxunits = 'counts', \
       refnbright = INDEF, minobj = 15, nmatch = 30, matching = 'tolerance', \
       xyxin = INDEF, xyyin = INDEF, tolerance = 4.0, fwhmpsf = 1.5, \
       sigma = 0.0, datamin = INDEF, datamax = INDEF, threshold = 4.0, \
       nsigma = 1.5, fitgeometry = 'shift', function = 'polynomial', \
       maxiter = 3, reject = 3.0, crossref = '', margin = 50, tapersz = 50, \
       pad = no, fwhm = 7.0, ellip = 0.05, pa = 45.0, fitbox = 7, \
    Stdout=1)

    if verbose:
        for line in status:
            print line

    return status
コード例 #49
0
ファイル: fitellifunc.py プロジェクト: svn2github/pymorph
def run_elli(input, output, xcntr, ycntr, eg, pa, sma):#,radd,background):
    """The function resposible for fit ellipse"""
    iraf.stsdas(_doprint=0)
    iraf.tables(_doprint=0)
    iraf.stsdas.analysis(_doprint=0)
    iraf.stsdas.analysis.isophote(_doprint=0)
    image_exist = 1
#    if(str(input)[:3] == 'ima'):
#        output = 'elli_' + input[6:-4] + 'txt' 
#    if(str(input)[:3] == 'out'):
#        output = 'out_elli_' + str(input)[4:-7] + 'txt'
#    else:
#        output = 'elli_' + input[:-5] + '.txt'
	#unlearn geompar	controlpar samplepar magpar ellipse
    iraf.unlearn('geompar')
    iraf.geompar.x0=xcntr
    iraf.geompar.y0=ycntr
    iraf.geompar.ellip0=eg
    iraf.geompar.pa0=pa
    iraf.geompar.sma0=10
    iraf.geompar.minsma=0.1
    iraf.geompar.maxsma=sma*5.0
    iraf.geompar.step=0.1
    iraf.geompar.recente="no"
    iraf.geompar.xylearn="no"
    iraf.unlearn('controlpar')
    iraf.controlpar.conver=0.05
    iraf.controlpar.minit=10
    iraf.controlpar.maxit=50
    iraf.controlpar.hcenter="no"
    iraf.controlpar.hellip="no"
    iraf.controlpar.hpa="no"
    iraf.controlpar.wander="INDEF"
    iraf.controlpar.maxgerr=0.5
    iraf.controlpar.olthres=1
    iraf.controlpar.soft="no"
    iraf.samplepar.integrm="bi-linear"
    iraf.samplepar.usclip=3
    iraf.samplepar.lsclip=3
    iraf.samplepar.nclip=0
    iraf.samplepar.fflag=0.5
    iraf.magpar.mag0=c.mag_zero
    iraf.magpar.refer=1
    iraf.magpar.zerolev=0
    iraf.ellipse("".join(input), output="test", interac="no",Stdout="ellip", \
                 Stderr="err")
    iraf.tprint("test.tab", prparam="no", prdata="yes", pwidth=80, plength=0, \
                showrow="no", orig_row="no", showhdr="no", showunits="no", \
                columns="SMA, INTENS, INT_ERR, MAG, MAG_LERR, MAG_UERR, \
                TFLUX_E", rows="-", \
                option="plain", align="yes", sp_col="", lgroup=0, Stdout=output)
    for myfile in ['ellip','err','test.tab']:
        if os.access(myfile,os.F_OK):
            os.remove(myfile)
コード例 #50
0
def scale_ota(img, ota, scale):
    image = odi.bgsubpath + 'bgsub_' + ota + '.' + img.stem()
    imout = odi.scaledpath + 'scaled_' + ota + '.' + img.stem()
    iraf.unlearn(iraf.imutil.imarith)
    iraf.imutil.imarith.setParam('operand1', image)
    iraf.imutil.imarith.setParam('op', '/')
    iraf.imutil.imarith.setParam('operand2', scale)
    iraf.imutil.imarith.setParam('result', imout)
    iraf.imutil.imarith.setParam('verbose', 'yes')
    iraf.imutil.imarith(mode='h')
    return
コード例 #51
0
ファイル: __init__.py プロジェクト: griffin-h/lcogtgemini
def calibrate(scifiles, extfile, observatory):
    for f in scifiles:
        redorblue = getredorblue(f)
        iraf.unlearn(iraf.gscalibrate)
        iraf.gscalibrate('et' + f[:-4] + '.fits',
                         sfunc='sens' + redorblue + '.fits', fl_ext=True, fl_vardq=dodq,
                         extinction=extfile, observatory=observatory)
        
        if os.path.exists('cet' + f[:-4] + '.fits'):
            iraf.unlearn(iraf.splot)
            iraf.splot('cet' + f.replace('.txt', '.fits') + '[sci]')  # just to check
コード例 #52
0
ファイル: reduce.py プロジェクト: svalenti/lcogtgemini
def calibrate(scifiles, extfile, observatory):
    for f in scifiles:
        redorblue = getredorblue(f)
        iraf.unlearn(iraf.gscalibrate)
        iraf.gscalibrate('et' + f[:-4] + '.fits',
                         sfunc='sens' + redorblue + '.fits', fl_ext=True,
                         extinction=extfile, observatory=observatory)
        
        if os.path.exists('cet' + f[:-4] + '.fits'):
            iraf.unlearn(iraf.splot)
            iraf.splot('cet' + f.replace('.txt', '.fits') + '[sci]')  # just to check
コード例 #53
0
ファイル: odi_illcor.py プロジェクト: oboberg/odi-tools
def get_gaps_rep(img, ota):
    """
    Create a numpy array mask of the gaps in a reprojected ota.

    Parameters
    ----------
    img : str
        Name of image
    ota : str
        Name of OTA
    Returns
    -------
    gaps_mask : numpy array
        A numpy array of the gap location on the ota.
    """

    image = odi.reprojpath + 'reproj_' + ota + '.' + img.stem()
    hdulist = odi.fits.open(image)
    hdu = hdulist[0]
    # plt.imshow(hdu.data, origin='lower', cmap='Greys_r', vmin=-10., vmax=500.)
    # plt.show()
    gaps_mask1 = (hdu.data < 1.0).astype(int)
    selem = np.ones((5, 5))  # dilate using a 25x25 box
    gaps_mask = odi.binary_dilation(gaps_mask1, selem)
    hdulist.close()

    # also update the bad pixel mask for the image to make sure the cell gaps are masked
    # this is necessary for the final imcombine
    mask_name = odi.bppath + 'reproj_mask_' + ota + '.' + img.stem()
    BPM = mask_name.replace('fits', 'pl')
    if not os.path.isfile(BPM):
        # mask,gaps = mask_ota(img,ota)
        hdu = odi.fits.PrimaryHDU(gaps_mask.astype(float))
        if not os.path.isfile(mask_name):
            hdu.writeto(mask_name, clobber=True)
    if not os.path.isfile(mask_name.replace('fits', 'pl')):
        iraf.unlearn(iraf.imutil.imcopy)
        iraf.imutil.imcopy.setParam('input', mask_name)
        iraf.imutil.imcopy.setParam('output', mask_name.replace('fits', 'pl'))
        iraf.imutil.imcopy.setParam('verbose', 'no')
        iraf.imutil.imcopy(mode='h')
    # if os.path.isfile(mask_name): # we don't need to keep the reproj fits mask, it takes up a ton of space
    #     iraf.imutil.imdelete(mask_name, verify='no', mode='h')
    iraf.unlearn(iraf.imutil.hedit)
    iraf.imutil.hedit.setParam('images', image)
    iraf.imutil.hedit.setParam('fields', 'BPM')
    iraf.imutil.hedit.setParam('value', BPM)
    iraf.imutil.hedit.setParam('add', 'yes')
    iraf.imutil.hedit.setParam('addonly', 'no')
    iraf.imutil.hedit.setParam('verify', 'no')
    iraf.imutil.hedit.setParam('update', 'yes')
    iraf.imutil.hedit(mode='h')

    return gaps_mask
コード例 #54
0
ファイル: detect_stars.py プロジェクト: georgezhou/hsfu23
def set_findpars():
    iraf.unlearn(iraf.findpars)
    iraf.findpars.setParam("threshold",5.0,check=1,exact=1)
    iraf.findpars.setParam("nsigma",2.0,check=1,exact=1)
    iraf.findpars.setParam("ratio",1.0,check=1,exact=1)
    iraf.findpars.setParam("theta",0.0,check=1,exact=1)
    iraf.findpars.setParam("sharplo",0.0,check=1,exact=1)
    iraf.findpars.setParam("sharphi",1.0,check=1,exact=1)
    iraf.findpars.setParam("roundlo",-1.0,check=1,exact=1)
    iraf.findpars.setParam("roundhi",1.0,check=1,exact=1)
    iraf.findpars.setParam("mkdetections",0,check=1,exact=1)
コード例 #55
0
ファイル: rusalt_raw.py プロジェクト: Youssef15015/RuSalt-1
def mosaic(fs=None):

    iraf.cd('work')
    # If the file list is not given, grab the default files
    if fs is None:
        fs = glob('flts/*.fits')
    # Abort if there are no files
    if len(fs) == 0:
        print "WARNING: No flat-fielded images to mosaic."
        iraf.cd('..')
        return

    if not os.path.exists('mos'):
        os.mkdir('mos')

    # Get the images to work with
    ims, gas = get_scis_and_arcs(fs)

    for i, f in enumerate(ims):
        ga = gas[i]
        fname = f.split('/')[1]
        typestr = fname[:3]
        # by our naming convention, imnum should be the last 4 characters
        # before the '.fits'
        imnum = fname[-9:-5]
        outname = 'mos/' + typestr
        outname += '%05.2fmos%04i.fits' % (float(ga), int(imnum))
        # prepare to run saltmosaic
        iraf.unlearn(iraf.saltmosaic)
        iraf.flpr()
        iraf.saltmosaic(images=f,
                        outimages=outname,
                        outpref='',
                        geomfile=pysaltpath + '/data/rss/RSSgeom.dat',
                        clobber=True,
                        mode='h')

        # Make a bad pixel mask marking where there is no data.
        h = pyfits.open(outname, 'update')
        maskim = h[1].data.copy()
        maskim[:, :] = 0.0
        maskim[abs(h[1].data) < 1e-5] = 1
        imhdu = pyfits.ImageHDU(maskim)

        h.append(imhdu)
        h[1].header['BPMEXT'] = 2
        h[2].header['EXTNAME'] = 'BPM'
        h[2].header['CD2_2'] = 1
        h.flush()
        h.close()

    iraf.cd('..')
コード例 #56
0
ファイル: __init__.py プロジェクト: griffin-h/lcogtgemini
def speccombine(fs, outfile):
    nsteps = 8001
    lamgrid = np.linspace(3000.0, 11000.0, nsteps)

    nfs = len(fs)
    # for each aperture
    # get all of the science images
    specs = np.zeros((nfs, nsteps))
    specerrs = np.zeros((nfs, nsteps))
    for i, f in enumerate(fs):
        hdu = fits.open(f)
        lam = fitshdr_to_wave(hdu[0].header.copy())

        # interpolate each spectrum onto a common wavelength scale

        specs[i] = np.interp(lamgrid, lam, hdu[0].data,
                             left=0.0, right=0.0)
        # Also calculate the errors. Right now we assume that the variances
        # interpolate linearly. This is not strictly correct but it should be
        # close. Also we don't include terms in the variance for the
        # uncertainty in the wavelength solution.
        specerrs[i] = 0.1 * specs[i]

    # minimize the chi^2 given free parameters are multiplicative factors
    # We could use linear or quadratic, but for now assume constant
    # Assume 3 chips for now
    p0 = np.ones(nfs / 3)

    results = optimize.minimize(combine_spec_chi2, p0,
                                args=(lamgrid, specs, specerrs),
                                method='Nelder-Mead',
                                options={'maxfev': 1e5, 'maxiter': 1e5, 'ftol':1e-5})

    # write the best fit parameters into the headers of the files
    # Dump the list of spectra into a string that iraf can handle
    iraf_filelist = str(fs).replace('[', '').replace(']', '').replace("'", '') #.replace(',', '[SCI],')
    #iraf_filelist += '[SCI]'

    # write the best fit results into a file
    lines = []

    for p in np.repeat(results['x'], 3):
        lines.append('%f\n' % (1.0 / p))
    f = open('scales.dat', 'w')
    f.writelines(lines)
    f.close()
    # run scombine after multiplying the spectra by the best fit parameters
    if os.path.exists(outfile):
        os.remove(outfile)
    iraf.unlearn(iraf.scombine)
    iraf.scombine(iraf_filelist, outfile, scale='@scales.dat',
                  reject='avsigclip', lthreshold='INDEF', w1=bluecut)
コード例 #57
0
ファイル: rusaltD.py プロジェクト: Youssef15015/rusalt
def background(fs=None):
    iraf.cd('work')
    # Get rectified science images
    if fs is None:
        fs = glob('nrm/sci*nrm*.fits')
    if len(fs) == 0:
        print "WARNING: No rectified images for background-subtraction."
        iraf.cd('..')
        return

    if not os.path.exists('bkg'):
        os.mkdir('bkg')

    for f in fs:
        print("Subtracting background for %s" % f)
        # Make sure dispaxis is set correctly
        pyfits.setval(f, 'DISPAXIS', value=1)

        # the outfile name is very similar, just change folder prefix and
        # 3-char stage substring
        outfile = f.replace('nrm','bkg')
        # We are going to use fit1d instead of the background task
        # Go look at the code for the background task: it is literally a wrapper for 1D
        # but it removes the BPM option. Annoying.
        iraf.unlearn(iraf.fit1d)
        iraf.fit1d(input=f + '[SCI]', output='tmpbkg.fits', bpm=f + '[BPM]',
                   type='difference', sample='52:949', axis=2,
                   interactive='no', naverage='1', function='legendre',
                   order=5, low_reject=1.0, high_reject=1.0, niterate=5,
                   grow=0.0, mode='hl')

        # Copy the background subtracted frame into the rectified image
        # structure.
        # Save the sky spectrum as extension 3
        hdutmp = pyfits.open('tmpbkg.fits')
        hdu = pyfits.open(f)
        skydata = hdu[1].data - hdutmp[0].data
        hdu[1].data[:, :] = hdutmp[0].data[:, :]

        hdu.append(pyfits.ImageHDU(skydata))
        hdu[3].header['EXTNAME'] = 'SKY'
        hdu[3].data[hdu['BPM'] == 1] = 0.0

        # Add back in the median sky level for things like apall and lacosmicx
        hdu[1].data[:, :] += np.median(skydata)
        hdu[1].data[hdu['BPM'] == 1] = 0.0
        hdutmp.close()
        hdu.writeto(outfile, clobber=True)  # saving the updated file
        # (data changed)
        os.remove('tmpbkg.fits')
    iraf.cd('..')
コード例 #58
0
ファイル: __init__.py プロジェクト: griffin-h/lcogtgemini
def makemasterflat(flatfiles, rawpath, plot=True):
    # normalize the flat fields
    for f in flatfiles:
        binning = get_binning(f, rawpath)
        # Use IRAF to get put the data in the right format and subtract the
        # bias
        # This will currently break if multiple flats are used for a single setting
        iraf.unlearn(iraf.gsreduce)
        if dobias:
            biasfile = "bias{binning}".format(binning=binning)
        else:
            biasfile = ''
        iraf.gsreduce('@' + f, outimages = f[:-4]+'.mef.fits',rawpath=rawpath, fl_bias=dobias,
                      bias=biasfile, fl_over=dooverscan, fl_flat=False, fl_gmosaic=False,
                      fl_fixpix=False, fl_gsappwave=False, fl_cut=False, fl_title=False,
                      fl_oversize=False, fl_vardq=dodq)

        if do_qecorr:
            # Renormalize the chips to remove the discrete jump in the
            # sensitivity due to differences in the QE for different chips
            iraf.unlearn(iraf.gqecorr)

            iraf.gqecorr(f[:-4]+'.mef', outimages=f[:-4]+'.qe.fits', fl_keep=True, fl_correct=True,
                         refimages=f[:-4].replace('flat', 'arc.arc.fits'),
                         corrimages=f[:-9] +'.qe.fits', verbose=True, fl_vardq=dodq)

            iraf.unlearn(iraf.gmosaic)
            iraf.gmosaic(f[:-4]+'.qe.fits', outimages=f[:-4]+'.mos.fits', fl_vardq=dodq, fl_clean=False)
        else:
            iraf.unlearn(iraf.gmosaic)
            iraf.gmosaic(f[:-4]+'.mef.fits', outimages=f[:-4]+'.mos.fits', fl_vardq=dodq, fl_clean=False)

        flat_hdu = fits.open(f[:-4] + '.mos.fits')

        data = np.median(flat_hdu['SCI'].data, axis=0)
        chip_edges = get_chipedges(data)

        x = np.arange(len(data), dtype=np.float)
        x /= x.max()

        y = data / np.median(data)

        fitme_x = x[chip_edges[0][0]:chip_edges[0][1]]
        fitme_x = np.append(fitme_x, x[chip_edges[1][0]:chip_edges[1][1]])
        fitme_x = np.append(fitme_x, x[chip_edges[2][0]:chip_edges[2][1]])

        fitme_y = y[chip_edges[0][0]:chip_edges[0][1]]
        fitme_y = np.append(fitme_y, y[chip_edges[1][0]:chip_edges[1][1]])
        fitme_y = np.append(fitme_y, y[chip_edges[2][0]:chip_edges[2][1]])

        fit = pfm.pffit(fitme_x, fitme_y, 21, 7, robust=True,
                    M=sm.robust.norms.AndrewWave())
        if plot:
            pyplot.ion()
            pyplot.clf()
            pyplot.plot(x, y)
            pyplot.plot(x, pfm.pfcalc(fit, x))
            _junk = raw_input('Press enter to continue')
        flat_hdu['SCI'].data /= pfm.pfcalc(fit, x) * np.median(data)
        flat_hdu.writeto(f[:-4] + '.fits')
コード例 #59
0
def wavelength_calibration(targetdir):

    """
    Does wavelength calibration.

    Writes every fit to database so make sure it's using the correct one.

    This needs to be run in object directory for database

    """

    print 'Target directory is ' + targetdir
    print 'Doing wavelength calibration...'

    if os.getcwd() != targetdir:

        print 'Warning: current working directory must be target directory!'

        return None

    iraf.noao(_doprint=0)
    iraf.onedspec(_doprint=0)

    iraf.unlearn('identify')

    iraf.identify.setParam('images','aimcomb.fits')
    iraf.identify.setParam('coordli','/home/lc585/Dropbox/IoA/WHT_Proposal_2015a/argon+xenon.dat')
    iraf.identify.setParam('niterat',1)
    iraf.identify.setParam('function','spline3')
    iraf.identify.setParam('order',3)
    iraf.identify.setParam('zwidth',200.0) #  Zoom graph width in user units
    iraf.identify.setParam('database','database')

    iraf.identify()

    # Update fits header

    print '\n' '\n' '\n'
    print 'Updating fits header...'

    iraf.hedit.setParam('images','imcomb.ms.fits')
    iraf.hedit.setParam('fields','REFSPEC1')
    iraf.hedit.setParam('value','aimcomb.fits') # should be wavelength calibrated?
    iraf.hedit.setParam('add','yes')
    iraf.hedit.setParam('verify','yes')
    iraf.hedit.setParam('show','yes')

    iraf.hedit()

    return None
コード例 #60
0
def run_fitparams(weight=True):
    if weight:
        weighting = 'photometric'
    else:
        weighting = 'uniform'

    ir.delete('standards.transParams')
    ir.digiphot()
    ir.photcal()
    ir.unlearn('fitparams')
    ir.fitparams('standards.instMag',
                 'photcal_ukirt_faint.dat',
                 'standards.config',
                 'standards.transParams',
                 weighting=weighting)