Exemplo n.º 1
0
def main():
    fn_inputmodel = 'figs/biggerr/egrater_0.600000,1.00000,4,-3.50000,112.400,0,0,112.400,84.6000,70,2,0_inputcolsc.fits'
    fn_psfsubmodel = 'figs/biggerr/egrater_0.600000,1.00000,4,-3.50000,112.400,0,0,112.400,84.6000,70,2,0_psfsubcol.fits'
    fn_realdata = 'figs/biggerr/biggerr,2,6,1_collapsed.fits'

    print "Input model:", fn_inputmodel
    print "PSF subtracted model:", fn_psfsubmodel

    inputmodel = pyfits.getdata(fn_inputmodel)
    #inputmodel /= np.nansum(inputmodel)

    psfsubmodel = pyfits.getdata(fn_psfsubmodel)
    #psfsubmodel /= np.nansum(psfsubmodel)

    #pdb.set_trace()

    atten = psfsubmodel / inputmodel
    atten[abs(atten) > 1] = 0
    outname = 'figs/biggerr/attenuationmap.fits'

    pyfits.writeto(outname, atten, clobber=True)
    print "Wrote:", outname

    fn_attencorrected = fn_realdata[:-5] + '_atten_corr.fits'
    copyfile(fn_realdata, fn_attencorrected)  #duplicate file with new name

    realdisk = pyfits.getdata(fn_realdata)
    realdisk_corrected = realdisk / atten
    pyfits.update(fn_attencorrected, realdisk_corrected, ext=1)
    print "Wrote:", fn_attencorrected
Exemplo n.º 2
0
def writebigfits(output_name, data_in, xsize, ysize, out_size, hdr_in='none'):
    data = np.zeros((ysize, xsize), dtype=np.float32)
    hdu = pyfits.PrimaryHDU(data=data)
    header = hdu.header

    if hdr_in is 'none':
        headlen = (36*4)-1
    else:
        headlen = len(hdr_in)

    while len(header) < headlen:
        header.append()

    header['NAXIS1'] = xsize
    header['NAXIS2'] = ysize
    header.tofile(output_name)
    with open(output_name, 'rb+') as fobj:
        fobj.seek(out_size)
        fobj.write('\0')

    if hdr_in is 'none':
        pyfits.update(output_name, data_in, endcard=ignore_missing_end )
    else:
        pyfits.update(output_name, data_in, header=hdr_in)

    print str(output_name)+' successfully written to file!'
    
    return
Exemplo n.º 3
0
def modify_cut_radius(run_dir, cut_radius, center):
    cut_radius = float(cut_radius)
    print 'Removing particles outside %1.1f kpc' % cut_radius
    initial = run_dir + '/input/initial.fits'
    pd, h = pyfits.getdata(initial, extname='PARTICLEDATA', header=True)
    pos = pd['position']
    rad = np.sqrt(np.sum((pos - center)**2.0, axis=1))
    idx = rad < cut_radius
    pd = pd[idx]
    pyfits.update(initial, pd, header=h, extname='PARTICLEDATA')
Exemplo n.º 4
0
def modify_cut_radius(run_dir, cut_radius, center):
    cut_radius = float(cut_radius)
    print 'Removing particles outside %1.1f kpc'%cut_radius
    initial = run_dir+'/input/initial.fits'
    pd,h = pyfits.getdata(initial,extname='PARTICLEDATA',header=True)
    pos = pd['position']
    rad = np.sqrt(np.sum((pos - center)**2.0,axis=1))
    idx = rad < cut_radius
    pd = pd[idx]
    pyfits.update(initial,pd,header=h,extname='PARTICLEDATA')
Exemplo n.º 5
0
def headerReplace(sourceImage, destImage):
    sourceHduList = pf.open(sourceImage)
    sourceHeader = sourceHduList[0].header

    destHduList = pf.open(destImage, mode='update')
    destData = destHduList[0].data
    pf.update(destImage, destData, sourceHeader)

    sourceHduList.close()

    return
Exemplo n.º 6
0
def modify_ages(run_dir, min_age=None, max_age=None):
    if min_age is None: min_age = -1.0
    if max_age is None: max_age = np.inf
    min_age = float(min_age)
    max_age = float(max_age)
    print 'Updating particle min ages %1.1e years' % min_age
    print 'Updating particle max ages %1.1e years' % max_age
    initial = run_dir + '/input/initial.fits'
    pd, h = pyfits.getdata(initial, extname='PARTICLEDATA', header=True)
    pd = pd[pd['age'] > min_age]
    pd = pd[pd['age'] < max_age]
    pyfits.update(initial, pd, header=h, extname='PARTICLEDATA')
Exemplo n.º 7
0
def modify_ages(run_dir, min_age=None, max_age=None):
    if min_age is None: min_age = -1.0
    if max_age is None: max_age = np.inf
    min_age= float(min_age)
    max_age= float(max_age)
    print 'Updating particle min ages %1.1e years'%min_age
    print 'Updating particle max ages %1.1e years'%max_age
    initial = run_dir+'/input/initial.fits'
    pd,h = pyfits.getdata(initial,extname='PARTICLEDATA',header=True)
    pd=pd[pd['age']>min_age]
    pd=pd[pd['age']<max_age]
    pyfits.update(initial,pd,header=h,extname='PARTICLEDATA')
Exemplo n.º 8
0
def modify_metallicity(run_dir, fmassraw):
    fmassraw = np.array([float(x) for x in fmassraw.split(',')])
    age_intervals = fmassraw[::2]
    fmass = fmassraw[1::2]
    assert fmass.shape[0] == age_intervals.shape[0] - 1
    initial = run_dir + '/input/initial.fits'
    pd, h = pyfits.getdata(initial, extname='PARTICLEDATA', header=True)
    pd = np.array(pd)
    for agea, ageb, fm in zip(age_intervals[:-1], age_intervals[1:], fmass):
        agea, ageb = float(agea), float(ageb)
        idx = pd['age'] >= agea
        idx &= pd['age'] <= ageb
        z = pd['metallicity']
        z[idx] *= float(fm)
        pd['metallicity'] = z
        out = (np.sum(idx), agea / 1e6, ageb / 1e6, fm)
        print 'modified %i particles %1.1eMyr-%1.1eMyr by %1.1f ' % out
    pyfits.update(initial, pd, header=h, extname='PARTICLEDATA')
Exemplo n.º 9
0
def modify_metallicity(run_dir, fmassraw):
    fmassraw = np.array([float(x) for x in fmassraw.split(',')])
    age_intervals = fmassraw[::2]
    fmass = fmassraw[1::2]
    assert fmass.shape[0] == age_intervals.shape[0]-1
    initial = run_dir+'/input/initial.fits'
    pd,h = pyfits.getdata(initial,extname='PARTICLEDATA',header=True)
    pd = np.array(pd)
    for agea,ageb,fm in zip(age_intervals[:-1],age_intervals[1:],fmass):
        agea,ageb = float(agea),float(ageb)
        idx  = pd['age']>=agea
        idx &= pd['age']<=ageb
        z = pd['metallicity']
        z[idx] *= float(fm)
        pd['metallicity'] = z
        out = (np.sum(idx),agea/1e6,ageb/1e6,fm)
        print 'modified %i particles %1.1eMyr-%1.1eMyr by %1.1f '%out
    pyfits.update(initial,pd,header=h,extname='PARTICLEDATA')
Exemplo n.º 10
0
def UpdatePsfRaDec(element):
    """The function which will update the psf header if the psf files
       are in the specified format"""
    try:
        ra1 = float(str(element)[4:6])
        ra2 = float(str(element)[6:8])
        ra3 = float(str(element)[8:10]) + float(str(element)[10]) / 10.0
        dec1 = float(str(element)[11:-10])
        dec2 = float(str(element)[-10:-8])
        dec3 = float(str(element)[-8:-6]) + float(str(element)[-6]) / 10.0
        ra = HMSToDeg(ra1, ra2, ra3)
        dec = DMSToDeg(dec1, dec2, dec3)
        data, header = pyfits.getdata(element, header=True)
        header.update('RA_TARG', ra, "RA")
        header.update('DEC_TARG', dec, "DEC")
        pyfits.update(element, data, header)
    except:
        pass
Exemplo n.º 11
0
def modify_ism_temp(run_dir, fism_temp):
    fism_temp = np.array([float(x) for x in fism_temp.split(',')])
    temp_intervals = fism_temp[::2]
    fts = fism_temp[1::2]
    assert fts.shape[0] == temp_intervals.shape[0] - 1
    initial = run_dir + '/input/initial.fits'
    gd, h = pyfits.getdata(initial, extname='GRIDDATA', header=True)
    gd = np.array(gd)
    for ta, tb, ft in zip(temp_intervals[:-1], temp_intervals[1:], fts):
        ta, tb = float(ta), float(tb)
        ism_temp = gd['gas_temp_m'] / gd['mass_gas']
        idx = ism_temp > ta
        idx &= ism_temp < tb
        ism_temp_m = gd['gas_temp_m']
        ism_temp_m[idx] *= ft
        gd['gas_temp_m'] = ism_temp_m
        out = (np.sum(idx), ta, tb, ft)
        print 'modified %i cells %1.1fK-%1.1fK by %1.1f ' % out
    pyfits.update(initial, gd, header=h, extname='GRIDDATA')
Exemplo n.º 12
0
def modify_ism_temp(run_dir, fism_temp):
    fism_temp= np.array([float(x) for x in fism_temp.split(',')])
    temp_intervals = fism_temp[::2]
    fts = fism_temp[1::2]
    assert fts.shape[0] == temp_intervals.shape[0]-1
    initial = run_dir+'/input/initial.fits'
    gd,h = pyfits.getdata(initial,extname='GRIDDATA',header=True)
    gd = np.array(gd)
    for ta,tb,ft in zip(temp_intervals[:-1],temp_intervals[1:],fts):
        ta,tb = float(ta),float(tb)
        ism_temp = gd['gas_temp_m']/gd['mass_gas']
        idx  = ism_temp > ta
        idx &= ism_temp < tb
        ism_temp_m= gd['gas_temp_m']
        ism_temp_m[idx] *= ft
        gd['gas_temp_m'] = ism_temp_m 
        out = (np.sum(idx),ta,tb,ft)
        print 'modified %i cells %1.1fK-%1.1fK by %1.1f '%out
    pyfits.update(initial,gd,header=h,extname='GRIDDATA')
Exemplo n.º 13
0
def autopipestack(pipevar=inpipevar):
    """
    NAME:
        autopipepipestack
    PURPOSE:
    	Does zeropoint correction on each individual frame using sextractor and get_SEDs. 
        Creates flux scale (newflxsc) from how close to median of zeropoint values.  Uses
        flux scale to stack images in Swarp (has moved bad zeropoint values and bad newflxsc
        values to marked folders - badzptfit/ and badflxsc/) and calculates absolute zeropoint 
        correction of coadd. Saves zeropoint plot as zpt_(FILTER).ps
    OPTIONAL KEYWORDS:
        pipevar  - input pipeline parameters (typically set in ratautoproc.pro, 
                   but can be set to default)
    EXAMPLE:
        autopipestack(pipevar=inpipevar)
    DEPENDENCIES:
        SWarp, get_SEDs, calc_zpt, findsexobj (sextractor)
    """
  
    print 'STACK'
    
    os.system('export CDSCLIENT=http') #Fix for problem with timeout with CDSCLIENT
    
    qtcmd = 'True'; quiet = 1
    if pipevar['verbose'] > 0: quiet = 0; qtcmd = 'False'
    
    # If swarp configuration file ('default.swarp') does not exist, move swarp 
    # output default configuration file
    if not os.path.isfile('default.swarp'): 
        os.system(pipevar['swarpcommand']+' -d > default.swarp')
    
    # Find files that have had astrometry performed on them, stop program if don't exist
    files = glob.glob(pipevar['imworkingdir'] + 'a*sfp' + pipevar['prefix'] + '*.fits')
    print pipevar['imworkingdir'] + 'a*sfp' + pipevar['prefix'] + '*.fits'
    print files
    if len(files) == 0:
        print 'Did not find any files! Check your data directory path!'
        return

    filetargs = []; fileexpos = []; filefilts = []; fileairmv = [] 
    filesatvs = []; filearms1 = []; filearms2 = []; filetime  = []
    
    # Grab information in the headers of astrometry corrected file and save to array
    for i,file in enumerate(files):
        head = pf.getheader(file)
        obstime = Time(head['DATE-OBS'], format='isot', scale='utc')
        
        
        # Strip target name of whitespace
        filetargs += [re.sub(r'\s+', '', head['TARGNAME'])]; 
        fileexpos += [head['EXPTIME']]; filefilts += [head['FILTER']]
        fileairmv += [head['AIRMASS']]; filesatvs += [head['SATURATE']]
        filearms1 += [head['ASTRRMS1']]; filearms2 += [head['ASTRRMS2']]
        filetime  += [obstime.jd]
    
    files     = np.array(files); filetargs = np.array(filetargs)
    fileexpos = np.array(fileexpos); filefilts = np.array(filefilts)
    filesatvs = np.array(filesatvs); fileairmv = np.array(fileairmv)
    filearms1 = np.array(filearms1); filearms2 = np.array(filearms2)
    filetime  = np.array(filetime)
    targets = set(filetargs)
    
    # Dictionary of corresponding columns for catalog file
    catdict = {'u': 2, 'g': 3, 'r': 4, 'i': 5, 'z': 6, 'y': 7, 
               'B': 8, 'V': 9, 'R':10, 'I':11, 'J':12, 'H':13, 'K':14,
               'ue':15,'ge':16,'re':17,'ie':18,'ze':19,'ye':20,
               'Be':21,'Ve':22,'Re':23,'Ie':24,'Je':25,'He':26,'Ke':27,'mode':28}
    
    # Finds files with same target and the filters associated with this target
    for targ in targets:

        thistarget = np.where(filetargs == targ)
        if len(thistarget) == 0: continue
        
        thistargetfilts = set(filefilts[thistarget])
        
        # Find files that have the same target and same filter and store information 
        # on the exposure times and airmass. Only use good Scamp astrometric fit files
        for filter in thistargetfilts:
            stacki = (filetargs == targ) & (filefilts == filter) &\
                     (filearms1 < 2.0e-4) & (filearms1 > 5.0e-6) &\
                     (filearms2 < 2.0e-4) & (filearms2 > 5.0e-6)
                                            
            if sum(stacki) == 0: continue

            stacklist = files[stacki]
            stackexps = fileexpos[stacki]
            stackairm = fileairmv[stacki]
            stacktime = filetime[stacki]
            
            medexp = np.median(stackexps)
            medair = np.median(stackairm)
            minair = min(stackairm)
            maxair = max(stackairm)
            totexp = sum(stackexps)
            nstack = len(stacklist)
            firsttime = Time(stacktime[0], format='jd', scale='utc').isot
            lasttime  = Time(stacktime[-1], format='jd', scale='utc').isot
            medtime   = Time(np.median(stacktime), format='jd', scale='utc').isot
            
            textslist = ' '.join(stacklist)
            
            zpts = []
            
            # Find stars for each individual frame and try to find matches with coadded 
            # frame with each frame optimized with PSF size
            for sfile in stacklist:
                head = pf.getheader(sfile)
                ipixscl = head['PIXSCALE']
                
                apd.findsexobj(sfile, 3.0, pipevar,pix=ipixscl,aperture=20.0, quiet=quiet)
                starfile = sfile + '.stars'
                
                svars = np.loadtxt(starfile, unpack=True)
                xim  = svars[1,:]
                yim  = svars[2,:]
                mag  = svars[3,:]
                mage = svars[4,:]
                flag = svars[5,:]
                elon = svars[8,:]
                
                # astropy does not like SWarp PV keywords or unicode, temporarily delete
                for key in head.keys():
                    if any(x in key for x in ['PV1_', 'PV2_', 'COMMENT', 'HISTORY']):
                        del head[key]
                        
                w = wcs.WCS(head)
                wrd = w.all_pix2world(np.transpose([xim, yim]), 0)                
                imfile  = sfile + '.im'
                catfile = sfile + '.cat'
                
                # Save stars from image
                np.savetxt(imfile, np.transpose([wrd[:,0],wrd[:,1],mag]))
                
                # Filter name correction:
                if filter == 'Z' or filter == 'Y': filter = filter.lower()
                
                if 'SDSS' in filter:
                    filter = filter[-1].lower()
                
                # Create catalog star file 
                # (python get_SEDs.py imfile filter catfile USNOB_THRESH alloptstars)
                sedcmd = 'python ' + pipevar['getsedcommand'] + ' ' + imfile + ' ' +\
                         filter + ' ' + catfile + " 15 True "+ qtcmd
                
                if pipevar['verbose'] > 0: print sedcmd
                os.system(sedcmd)
                
                if not os.path.isfile(catfile):
                    zpts += [float('NaN')]
                    continue
                
                # Read in catalog file
                cvars = np.loadtxt(catfile, unpack=True)
                
                refmag = cvars[catdict[filter],:]
                mode   = cvars[catdict['mode'],:]
                
                # Find catalog filter values and only cutoff values of actual detections
                goodind = (mode != -1) & (refmag < 90.0) & (flag < 8) & (elon <= 1.5)
                
                refmag = refmag[goodind]
                obsmag = mag[goodind]
                obserr = mage[goodind]
                obswts = np.zeros(len(obserr))
                obskpm = np.zeros(len(obsmag))
                
                # Store magnitudes and weights (with minimum magnitude error of 0.01)
                for i in np.arange(len(obsmag)):
                    if obserr[i] < 0.1:
                        obskpm[i] = obsmag[i]
                        obswts[i] = 1.0/(max(obserr[i], 0.01)**2)

                zpt, scats, rmss = apd.calc_zpt(np.array([refmag]), np.array([obskpm]), 
                                                np.array([obswts]), sigma=3.0)
                                
                # Reload because we had to remove distortion parameters before
                head = pf.getheader(sfile)
                data = pf.getdata(sfile)
                head['ABSZPT']   = (zpt[0] + 25.0, 'Relative zeropoint from calc_zpt')
                head['ABSZPTSC'] = (scats[0], 'Robust scatter of relative zeropoint')
                head['ABSZPRMS'] = (rmss[0], 'RMS of relative zeropoint')

                pf.update(sfile, data, head)
                zpts += zpt             
                
            # Move files with bad zeropoint calculations to folder 'badzptfit' 
            # and do not use those frames
            zpts = np.array(zpts)
            goodframes = np.isfinite(zpts)
            badframes  = ~np.isfinite(zpts)
            
            if len(zpts[badframes]) !=0:
                if not os.path.exists(pipevar['imworkingdir']+'/badzptfit'):
                    os.makedirs(pipevar['imworkingdir']+'/badzptfit')
                for file in stacklist[badframes]:
                    os.system('mv ' + file + ' ' +  pipevar['imworkingdir']+'/badzptfit/')
                zpts = zpts[goodframes]
                newstack = stacklist[goodframes]
            else:
                newstack = stacklist
            
            badnewflxsc = []
            # Add relative zeropoint values to headers and calculate flux scale. 
            # Remove unphysical fluxscale files
            medzp = np.median(zpts)
            for i,file in enumerate(newstack):
                head = pf.getheader(file)
                head['NEWFLXSC'] = (1.0/(10.0**( (zpts[i] - medzp)/2.5 )), 
                                    'Flux scaling based on median zp') 
                
                if 1.0/(10.0**( (zpts[i] - medzp)/2.5 )) < 0.1:
                    badnewflxsc += [file]
            
                data = pf.getdata(file)
                pf.update(file, data, head)
            
            removedframes = []
            # Removes files that have bad newflxsc values and removes from stack list
            if len(badnewflxsc) > 0:
                if not os.path.exists(pipevar['imworkingdir']+'/badflxsc'):
                    os.makedirs(pipevar['imworkingdir']+'/badflxsc')
                    
                os.system('mv ' + badnewflxsc +' '+ pipevar['imworkingdir']+'badflxsc/')
                
                removedframes += badnewflxsc
            
                # Remove files that have bad newflxsc values from list of stack
                bad = set(badnewflxsc)
                newstack = [x for x in newstack if x not in bad]                       

            newtextslist = ' '.join(newstack)   
            
            stackcmd = pipevar['swarpcommand']
            
            # Keywords to carry through
            stackcmd += ' -COPY_KEYWORDS OBJECT,TARGNAME,FILTER,' +\
                        'INSTRUME,PIXSCALE,WAVELENG,DATE-OBS,AIRMASS,FLATFLD,FLATTYPE '
                             	
            # Create output variables that will be used by SWarp
            outfl = pipevar['imworkingdir'] + 'coadd' + targ + '_'+ re.sub(r'[^\w]', '', medtime)+'_'+ filter + '.fits'
            outwt = pipevar['imworkingdir'] + 'coadd' + targ + '_'+ re.sub(r'[^\w]', '', medtime)+'_'+ filter + '.weight.fits'
            
            if pipevar['verbose'] > 0:
                stackcmd += ' -VERBOSE_TYPE NORMAL '
            else:
                stackcmd = stackcmd + ' -VERBOSE_TYPE QUIET '
            
            # Coadd with flux scale
            stackcmd = stackcmd + ' -SUBTRACT_BACK N -WRITE_XML N -IMAGEOUT_NAME ' +\
                       outfl + ' -WEIGHTOUT_NAME ' + outwt +\
                       ' -FSCALE_KEYWORD NEWFLXSC ' + newtextslist
                       
            if pipevar['verbose'] > 0:
                print stackcmd
            
            os.system(stackcmd)
            head   = pf.getheader(outfl)
            pixscl = head['PIXSCALE']
            
            apd.findsexobj(outfl, 10.0, pipevar, pix=pixscl, aperture=20.0,
                           wtimage=outwt, quiet=quiet)
                           
            head   = pf.getheader(outfl)
            cpsfdi = 1.34 * float(head['SEEPIX'])
            
            # Run sextractor again on new coadd file
            apd.findsexobj(outfl, 3.0, pipevar, pix=pixscl, aperture=cpsfdi, 
                           wtimage=outwt, quiet=quiet)
            
            head = pf.getheader(outfl)
            
            coaddvars = np.loadtxt(outfl+'.stars', unpack=True)
            xim  = coaddvars[1,:]
            yim  = coaddvars[2,:]
            mag  = coaddvars[3,:]
            mage = coaddvars[4,:]
            flag = coaddvars[5,:]
            elon = coaddvars[8,:]
                
            # astropy does not like SWarp PV keywords or unicode, temporarily delete
            for key in head.keys():
                if any(x in key for x in ['PV1_', 'PV2_', 'COMMENT', 'HISTORY']):
                    del head[key]
                        
            w = wcs.WCS(head)
            wrd = w.all_pix2world(np.transpose([xim, yim]), 0)                
            imfile  = outfl + '.im'
            catfile = outfl + '.cat'      		

            # Save stars from image
            np.savetxt(imfile, np.transpose([wrd[:,0],wrd[:,1],mag]))

            # Filter name correction:
            if filter == 'Z' or filter == 'Y': filter = filter.lower()
            
            # Create catalog star file 
            # (python get_SEDs.py imfile filter catfile USNOB_THRESH alloptstars)
            sedcmd = 'python ' + pipevar['getsedcommand'] + ' ' + imfile + ' ' +\
                     filter + ' ' + catfile + " 15 True "+ qtcmd
            
            if pipevar['verbose'] > 0: print sedcmd
            os.system(sedcmd)
            
            # Read in catalog file
            cvars = np.loadtxt(catfile, unpack=True)
            
            refmag = cvars[catdict[filter],:]
            mode   = cvars[catdict['mode'],:]

            # Find catalog filter values and only cutoff values of actual detections
            goodind = (mode != -1) & (refmag < 90.0) & (flag < 8) & (elon <= 1.3)
            
            refmag = refmag[goodind]
            obsmag = mag[goodind]
            obserr = mage[goodind]
            obswts = np.zeros(len(obserr))
            obskpm = np.zeros(len(obsmag))

            # Store magnitudes and weights (with minimum magnitude error of 0.01)
            for i in np.arange(len(obsmag)):
                if obserr[i] < 0.1:
                    obskpm[i] = obsmag[i]
                    obswts[i] = 1.0/(max(obserr[i], 0.01)**2)
            
            czpts, cscats, crmss = apd.calc_zpt(np.array([refmag]), np.array([obskpm]), 
                                    np.array([obswts]), sigma=1.0,
                                    plotter=pipevar['imworkingdir']+'zpt_'+filter+'.ps')
            
            chead = pf.getheader(outfl)
            
            # Add zeropoint keywords to header
            chead['SPIX']     = (cpsfdi, 'Final aperture size')
            chead['ABSZPT']   = (czpts[0]+25.0, 'Absolute zeropoint from calc_zpt')
            chead['ABSZPTSC'] = (cscats[0], 'Robust scatter of absolute zeropoint')
            chead['ABSZPRMS'] = (crmss[0], 'RMS of absolute zeropoint')
            
            # Add summary of stack information to header
            chead['DATE1']     = (firsttime, 'First frame time')
            chead['DATEN']     = (lasttime, 'Last frame time')
            chead['DATE']     = (medtime, 'Median frame time')
            chead['NSTACK']   = nstack
            chead['AIRMASS']  = (medair, 'Median exposure airmass')
            chead['AIRMIN']   = (minair, 'Minimum exposure airmass')
            chead['AIRMAX']   = (maxair, 'Maximum exposure airmass')
            chead['EXPTIME']  = (medexp, 'Effective rescaled exposure time')
            chead['TOTALEXP'] = (totexp, 'Total summed integration time')
            chead['MAXEXP']   = (max(stackexps), 'Length of longest exposure')
            chead['MINEXP']   = (min(stackexps), 'Length of shortest exposure')
            
            for i, file in enumerate(newstack):
                chead['STACK'+str(i)] = file    
      		
      		cdata = pf.getdata(outfl)
      		pf.update(outfl, cdata, chead)
      		
      	    if len(removedframes) > 0:
      	        print 'Removed frames with bad zeropoint fits: ' 
      	        print removedframes

    # If remove intermediate files keyword set, delete p(PREFIX)*.fits, fp(PREFIX)*.fits,
    # sky-*.fits, sfp(PREFIX)*.fits, zsfp(PREFIX)*.fits files
    if pipevar['rmifiles'] != 0:
        
        os.system('rm -f ' + pipevar['imworkingdir'] + 'p' + pipevar['prefix'] + '*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'fp' + pipevar['prefix'] + '*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + '*sky-*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'sfp' + pipevar['prefix'] + '*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'zsfp' + pipevar['prefix'] + '*.fits')  		                            
        os.system('rm -f ' + pipevar['imworkingdir'] + 'a*fp' + pipevar['prefix'] + '*.im')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'a*fp' + pipevar['prefix'] + '*.stars')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'a*fp' + pipevar['prefix'] + '*.cat')
        
Exemplo n.º 14
0
def add_hbeta_sources_DEBUG_ADD_DISP(dir, model_fpath, sfrhist_fpath, sfrhist_fname, snapnum):
    """Reads Laura's velfile and otherfile and gets the Hbeta
    luminosity for the NLR particles. Then adds these particles as
    pure Hbeta sources to the specified sfrhist file."""

    print "Current/peak memory:", memory_usage()
    gc.get_referrers()

    velfile='LHb_data_vel.dat'
    otherfile='LHb_data_other.dat'

    vpath=dir+'/'+model_fpath+'/'+velfile
    opath=dir+'/'+model_fpath+'/'+otherfile
    
    print dir, model_fpath, velfile, otherfile
    print vpath
    print opath
    
    velf=read_hbeta_file(vpath, snapnum)
    otherf=read_hbeta_file(opath, snapnum)

    hbetalum=velf[:,-1]
    print "Total Hbeta luminosity: %g Lsun = %g W"%(sum(hbetalum),sum(hbetalum)*3.83e26)

    csnd=velf[:,-2]  # local sound speed around NL particle, in km/s

    sfrhistfile = dir+'/'+sfrhist_fpath+'/'+sfrhist_fname

    print "Current/peak memory:", memory_usage()

    print "Reading data for existing sources"
    sys.stdout.flush()
    sf=pyfits.open(sfrhistfile,memmap=True)

    print "Current/peak memory:", memory_usage()

    phdu=sf['PARTICLEDATA']

    print "getting npart..."
    sys.stdout.flush()

    nold=phdu.data.shape[0]
    nadd = velf.shape[0]

    print "getting particle data..."
    sys.stdout.flush()

    lam=sf['LAMBDA'].data.field('lambda')
    #pdb.set_trace()
    sed=phdu.data.field('L_lambda')
    pos=phdu.data.field('position')
    vel=phdu.data.field('velocity')
    age=phdu.data.field('age')


    print "getting BH info..."
    sys.stdout.flush()

    # figure out which BH is which. Laura's file contains the pos of
    # the particle in ref to the BH, so by subtracting those vectors
    # we get - delta pos of the black holes. We want the position in
    # relation to the black hole that's the first column in her files.
    # (Laura's note: last particles in unmodified fits file are BHs, and
    # have age=nan.
    bhpart=where(age!=age)[0]

    if bhpart.shape[0]==1:
        # if we only have one then it's easy
        bhpos=pos[bhpart[0],:]
        # check that the other bh has junk
        assert otherf[0,3]==-1
        nbh = 1
    else:
        bhdeltapos=pos[bhpart[0],:]-pos[bhpart[1],:]
        lbhdeltapos=otherf[0,0:3]-otherf[0,3:6]
        #if these vectors are parallel we have the BHs switched
        nbh = 2
        if dot(bhdeltapos, lbhdeltapos)>0:
            bhpos=pos[bhpart[1],:]
        else:
            bhpos=pos[bhpart[0],:]
        

    print "Generating new PARTICLEDATA table"
    sys.stdout.flush()

    # create new table hdu. this also copies the data
    hdu = pyfits.new_table(phdu.columns, header=phdu.header, nrows=nold+nadd)

    print "ID: ",shape(hdu.data.field('ID'))
    print "position: ",shape(hdu.data.field('position'))
    print "mass: ",shape(hdu.data.field('mass'))

    print "nold-nbh=",nold-nbh
    print "nold+nadd-nbh=",nold+nadd-nbh
    
    # move BH data to end of table
    hdu.data.field('ID')[nold+nadd-nbh:] = hdu.data.field('ID')[nold-nbh:nold]
    hdu.data.field('position')[nold+nadd-nbh:] = hdu.data.field('position')[nold-nbh:nold]
    hdu.data.field('velocity')[nold+nadd-nbh:] = hdu.data.field('velocity')[nold-nbh:nold]
    hdu.data.field('L_lambda')[nold+nadd-nbh:] = hdu.data.field('L_lambda')[nold-nbh:nold]
    hdu.data.field('L_bol')[nold+nadd-nbh:] = hdu.data.field('L_bol')[nold-nbh:nold]
    hdu.data.field('radius')[nold+nadd-nbh:] = hdu.data.field('radius')[nold-nbh:nold]
    hdu.data.field('mass')[nold+nadd-nbh:] = hdu.data.field('mass')[nold-nbh:nold]
    hdu.data.field('metallicity')[nold+nadd-nbh:] = hdu.data.field('metallicity')[nold-nbh:nold]
    hdu.data.field('formation_time')[nold+nadd-nbh:] = hdu.data.field('formation_time')[nold-nbh:nold]
    hdu.data.field('parent_ID')[nold+nadd-nbh:] = hdu.data.field('parent_ID')[nold-nbh:nold]
    hdu.data.field('age')[nold+nadd-nbh:] = hdu.data.field('age')[nold-nbh:nold]
    hdu.data.field('creation_mass')[nold+nadd-nbh:] = hdu.data.field('creation_mass')[nold-nbh:nold]
 
    print "ID: ",shape(hdu.data.field('ID'))
    print "position: ",shape(hdu.data.field('position'))
    print "mass: ",shape(hdu.data.field('mass'))


    hbetased = array([make_hbeta_sed_DEBUG_add_dispersion(hbetalum[i],lam,csnd[i]) for i in range(nadd)])
    print "Integrated Hbeta L_lambda: %g W/m"%(sum(hbetased))

    print "hbetased:",hbetased.shape
    ised_hbeta = array([sum(hbetased[:,i]) for i in range(lam.size)])
    print "ised_hbeta:",ised_hbeta.shape
    
    # add new data before BH particles
    hdu.data.field('position')[nold-nbh:nold+nadd-nbh] = otherf[:,0:3]+bhpos
    hdu.data.field('velocity')[nold-nbh:nold+nadd-nbh] = velf[:,1:4]*1.0226903e-09
    #hdu.data.field('L_lambda')[nold-nbh:nold+nadd-nbh] = log10(hbetased+1e-30)
    #TEMPORARY, DEBUG ONLY:
    #hdu.data.field('L_lambda')[nold-nbh:nold+nadd-nbh] = log10(hbetased+1.0e+30)
    hdu.data.field('L_lambda')[nold-nbh:nold+nadd-nbh] = log10(hbetased+1.0e-5)
    #hdu.data.field('L_lambda')[nold-nbh:nold+nadd-nbh] = log10(hbetased+1.0e+10)
    #hdu.data.field('L_lambda')[nold-nbh:nold+nadd-nbh] = log10(hbetased+1.0e+20)
    hdu.data.field('L_bol')[nold-nbh:nold+nadd-nbh] = velf[:,-1]*3.83e26


    # calculate particle size from density and mass
    h=sf['gadget'].header['HubbleParam']
    mtomsun=sf['gadget'].header['UnitMass_in_g']*5.0273993e-34/h
    ltokpc=sf['gadget'].header['UnitLength_in_cm']*3.2407793e-22/h
    
    mass=otherf[:,8]*mtomsun
    density = otherf[:,6]*mtomsun/ltokpc**3

    ### try to get rid of some stuff we're done with:
    print "before del: Current/peak memory:", memory_usage()
    del velf
    del otherf    
    del hbetased
    print "after del: Current/peak memory:", memory_usage()

    hdu.data.field('radius')[nold-nbh:nold+nadd-nbh] = (3*mass/(4*pi*density))**(1./3)

    # fudge up new data

    #hdu.data.field('ID')[nold-nbh:nold+nadd-nbh] = 0 #0 only for NLs
    # new crazy scheme: assign fake, continguous IDs to the NL particles. 
    hdu.data.field('ID')[nold-nbh:nold+nadd-nbh] = hdu.data.field('ID')[nold-nbh-1]+range(1,nadd+1)     #0 only for NLs
    hdu.data.field('mass')[nold-nbh:nold+nadd-nbh] = 0     #0 only for NLs
    #hdu.data.field('mass')[nold-nbh:nold+nadd-nbh] = mass     #0 only for NLs
    hdu.data.field('metallicity')[nold-nbh:nold+nadd-nbh] = 0     #0 for BHs
    hdu.data.field('formation_time')[nold-nbh:nold+nadd-nbh] = 0     #0, not nan, for BHs
    hdu.data.field('parent_ID')[nold-nbh:nold+nadd-nbh] = 0     #0 for BHs
    #hdu.data.field('parent_ID')[nold-nbh:nold+nadd-nbh] = hdu.data.field('ID')[nold-nbh-1]+range(1,nadd+1)     #0 for BHs
    #hdu.data.field('age')[nold-nbh:nold+nadd-nbh] = nan     #nan for BHs
    hdu.data.field('age')[nold-nbh:nold+nadd-nbh] = 0     #nan for BHs
    hdu.data.field('creation_mass')[nold-nbh:nold+nadd-nbh] = 0.    #0 for BHs

    # update the integrated sed
    ihdu = sf['INTEGRATED_QUANTITIES']
    ised = ihdu.data.field('L_lambda')
    print "lg_ised",ised.shape,ised.dtype
    print "lg_ised[101]=",ised[101]
    print "ised_hbeta[101]=",ised_hbeta[101]    
    #ised_new = ised+ised_hbeta+1e-30
    # TEMPORARY; DEBUG ONLY:
    #ised_new = ised+ised_hbeta+1e+30
    ised_new = ised+ised_hbeta+1e-5
    #ised_new = ised+ised_hbeta+1e+10
    #ised_new = ised+ised_hbeta+1e+20
    print "lg_ised_new",ised_new.shape
    print "lg_ised_new[101]", ised_new[101]
    ihdu.data.field('L_lambda')[:] = ised_new

    #sf.close()

    print "Current/peak memory:", memory_usage()

    # add keyword with NLR particle info
    hdu.header.update('NNLR',nadd,'number of NLR particles added',after='LOGFLUX')
    hdu.header.update('NLRDIR',model_fpath,'NLR data subdirectory',after='LOGFLUX')

    print "Updating PARTICLEDATA hdu in file %s"%sfrhistfile
    sys.stdout.flush()

    print "Current/peak memory:", memory_usage()

    pyfits.update(sfrhistfile, hdu.data, hdu.header, 'PARTICLEDATA')

    del hdu.data
    
    print "Updating INTEGRATED_QUANTITIES hdu in file %s"%sfrhistfile
    sys.stdout.flush()

    print "Current/peak memory:", memory_usage()

    pyfits.update(sfrhistfile, ihdu.data, ihdu.header, 'INTEGRATED_QUANTITIES')

    del ihdu.data
    
    print "Current/peak memory:", memory_usage()

    sf.close()

    print "Current/peak memory:", memory_usage()

    gc.collect()
    gc.collect()
    
    print "Current/peak memory:", memory_usage()
    gc.get_referrers()

    return
Exemplo n.º 15
0
def add_hbeta_sources(velfile, otherfile, sfrhistfile, snapnum):
    """Reads Laura's velfile and otherfile and gets the Hbeta
    luminosity for the NLR particles. Then adds these particles as
    pure Hbeta sources to the specified sfrhist file."""
    
    velf=read_hbeta_file(velfile, snapnum)
    otherf=read_hbeta_file(otherfile, snapnum)

    hbetalum=velf[:,-1]
    print "Total Hbeta luminosity: %g Lsun = %g W"%(sum(hbetalum),sum(hbetalum)*3.83e26)

    print "Reading data for existing sources"
    sys.stdout.flush()
    sf=pyfits.open(sfrhistfile)

    phdu=sf['PARTICLEDATA']

    nold=phdu.data.shape[0]
    nadd = velf.shape[0]

    lam=sf['LAMBDA'].data.field('lambda')
    #pdb.set_trace()
    sed=phdu.data.field('L_lambda')
    pos=phdu.data.field('position')
    vel=phdu.data.field('velocity')
    age=phdu.data.field('age')

    # figure out which BH is which. Laura's file contains the pos of
    # the particle in ref to the BH, so by subtracting those vectors
    # we get - delta pos of the black holes. We want the position in
    # relation to the black hole that's the first column in her files.
    bhpart=where(age!=age)[0]

    if bhpart.shape[0]==1:
        # if we only have one then it's easy
        bhpos=pos[bhpart[0],:]
        # check that the other bh has junk
        assert otherf[0,3]==-1
    else:
        bhdeltapos=pos[bhpart[0],:]-pos[bhpart[1],:]
        lbhdeltapos=otherf[0,0:3]-otherf[0,3:6]
        #if these vectors are parallel we have the BHs switched
        if dot(bhdeltapos, lbhdeltapos)>0:
            bhpos=pos[bhpart[1],:]
        else:
            bhpos=pos[bhpart[0],:]

    print "Generating new PARTICLEDATA table"
    sys.stdout.flush()

    # create new table hdu. this also copies the data
    hdu = pyfits.new_table(phdu.columns, header=phdu.header, nrows=nold+nadd)

    hbetased = array([make_hbeta_sed(hbetalum[i],lam) for i in range(nadd)])
    print "Integrated Hbeta L_lambda: %g W/m"%(sum(hbetased))
                      
    # append new data
    hdu.data.field('position')[nold:] = otherf[:,0:3]+bhpos
    hdu.data.field('velocity')[nold:] = velf[:,1:4]*1.0226903e-09
    hdu.data.field('L_lambda')[nold:] = log10(hbetased+1e-300)
    hdu.data.field('L_bol')[nold:] = otherf[:,-1]*3.83e26

    # calculate particle size from density and mass
    h=sf['gadget'].header['HubbleParam']
    mtomsun=sf['gadget'].header['UnitMass_in_g']*5.0273993e-34/h
    ltokpc=sf['gadget'].header['UnitLength_in_cm']*3.2407793e-22/h
    
    mass=otherf[:,8]*mtomsun
    density = otherf[:,6]*mtomsun/ltokpc**3
    
    hdu.data.field('radius')[nold:] = (3*mass/(4*pi*density))**(1./3)

    # fudge up new data
    hdu.data.field('mass')[nold:] = 0
    hdu.data.field('metallicity')[nold:] = 0
    hdu.data.field('formation_time')[nold:] = nan
    hdu.data.field('parent_ID')[nold:] = 0
    hdu.data.field('age')[nold:] = nan
    hdu.data.field('creation_mass')[nold:] = 0.
    sf.close

    print "Updating file %s"%sfrhistfile
    sys.stdout.flush()

    pyfits.update(sfrhistfile, hdu.data, hdu.header, 'PARTICLEDATA')
Exemplo n.º 16
0
def autopipestack(pipevar=inpipevar, customcat=None, customcatfilt=[]):
    """
    NAME:
        autopipepipestack
    PURPOSE:
    	Does zeropoint correction on each individual frame using sextractor and get_SEDs. 
        Creates flux scale (newflxsc) from how close to median of zeropoint values.  Uses
        flux scale to stack images in Swarp (has moved bad zeropoint values and bad newflxsc
        values to marked folders - badzptfit/ and badflxsc/) and calculates absolute zeropoint 
        correction of coadd. Saves zeropoint plot as zpt_(FILTER).ps
    OPTIONAL KEYWORDS:
        pipevar  - input pipeline parameters (typically set in ratautoproc.pro, 
                   but can be set to default)
    EXAMPLE:
        autopipestack(pipevar=inpipevar)
    DEPENDENCIES:
        SWarp, get_SEDs, calc_zpt, findsexobj (sextractor)
    """

    print 'STACK'

    os.system(
        'export CDSCLIENT=http')  #Fix for problem with timeout with CDSCLIENT

    qtcmd = 'True'
    quiet = 1
    if pipevar['verbose'] > 0:
        quiet = 0
        qtcmd = 'False'

    # If swarp configuration file ('default.swarp') does not exist, move swarp
    # output default configuration file
    if not os.path.isfile('default.swarp'):
        os.system(pipevar['swarpcommand'] + ' -d > default.swarp')

    # Find files that have had astrometry performed on them, stop program if don't exist
    files = glob.glob(pipevar['imworkingdir'] + 'a*sfp' + pipevar['prefix'] +
                      '*.fits')
    print pipevar['imworkingdir'] + 'a*sfp' + pipevar['prefix'] + '*.fits'
    print files
    if len(files) == 0:
        print 'Did not find any files! Check your data directory path!'
        return

    filetargs = []
    fileexpos = []
    filefilts = []
    fileairmv = []
    filesatvs = []
    filearms1 = []
    filearms2 = []
    filetime = []

    # Grab information in the headers of astrometry corrected file and save to array
    for i, file in enumerate(files):
        head = pf.getheader(file)
        obstime = Time(head['DATE-OBS'], format='isot', scale='utc')

        # Strip target name of whitespace
        filetargs += [re.sub(r'\s+', '', head['TARGNAME'])]
        fileexpos += [head['EXPTIME']]
        filefilts += [head['FILTER']]
        fileairmv += [head['AIRMASS']]
        filesatvs += [head['SATURATE']]
        filearms1 += [head['ASTRRMS1']]
        filearms2 += [head['ASTRRMS2']]
        filetime += [obstime.jd]

    files = np.array(files)
    filetargs = np.array(filetargs)
    fileexpos = np.array(fileexpos)
    filefilts = np.array(filefilts)
    filesatvs = np.array(filesatvs)
    fileairmv = np.array(fileairmv)
    filearms1 = np.array(filearms1)
    filearms2 = np.array(filearms2)
    filetime = np.array(filetime)
    targets = set(filetargs)

    # Dictionary of corresponding columns for catalog file
    catdict = {
        'u': 2,
        'g': 3,
        'r': 4,
        'i': 5,
        'z': 6,
        'y': 7,
        'B': 8,
        'V': 9,
        'R': 10,
        'I': 11,
        'J': 12,
        'H': 13,
        'K': 14,
        'ue': 15,
        'ge': 16,
        're': 17,
        'ie': 18,
        'ze': 19,
        'ye': 20,
        'Be': 21,
        'Ve': 22,
        'Re': 23,
        'Ie': 24,
        'Je': 25,
        'He': 26,
        'Ke': 27,
        'mode': 28
    }

    # Finds files with same target and the filters associated with this target
    for targ in targets:

        thistarget = np.where(filetargs == targ)
        if len(thistarget) == 0: continue

        thistargetfilts = set(filefilts[thistarget])

        # Find files that have the same target and same filter and store information
        # on the exposure times and airmass. Only use good Scamp astrometric fit files
        for filter in thistargetfilts:
            stacki = (filetargs == targ) & (filefilts == filter) &\
                     (filearms1 < 2.0e-4) & (filearms1 > 5.0e-6) &\
                     (filearms2 < 2.0e-4) & (filearms2 > 5.0e-6)

            if sum(stacki) == 0: continue

            stacklist = files[stacki]
            stackexps = fileexpos[stacki]
            stackairm = fileairmv[stacki]
            stacktime = filetime[stacki]

            medexp = np.median(stackexps)
            medair = np.median(stackairm)
            minair = min(stackairm)
            maxair = max(stackairm)
            totexp = sum(stackexps)
            nstack = len(stacklist)
            firsttime = Time(stacktime[0], format='jd', scale='utc').isot
            lasttime = Time(stacktime[-1], format='jd', scale='utc').isot
            medtime = Time(np.median(stacktime), format='jd', scale='utc').isot

            textslist = ' '.join(stacklist)

            zpts = []

            # Find stars for each individual frame and try to find matches with coadded
            # frame with each frame optimized with PSF size
            for sfile in stacklist:
                head = pf.getheader(sfile)
                ipixscl = head['PIXSCALE']

                apd.findsexobj(sfile,
                               3.0,
                               pipevar,
                               pix=ipixscl,
                               aperture=20.0,
                               quiet=quiet)
                starfile = sfile + '.stars'

                svars = np.loadtxt(starfile, unpack=True)
                xim = svars[1, :]
                yim = svars[2, :]
                mag = svars[3, :]
                mage = svars[4, :]
                flag = svars[5, :]
                elon = svars[8, :]

                # astropy does not like SWarp PV keywords or unicode, temporarily delete
                for key in head.keys():
                    if any(x in key
                           for x in ['PV1_', 'PV2_', 'COMMENT', 'HISTORY']):
                        del head[key]

                w = wcs.WCS(head)
                wrd = w.all_pix2world(np.transpose([xim, yim]), 0)
                imfile = sfile + '.im'
                catfile = sfile + '.cat'

                # Save stars from image
                np.savetxt(imfile, np.transpose([wrd[:, 0], wrd[:, 1], mag]))

                # Filter name correction:
                if filter == 'Z' or filter == 'Y': filter = filter.lower()

                if 'SDSS' in filter:
                    filter = filter[-1].lower()

                nocustomcat = False
                # If custom catalog provided, match the same objects as the *.im file
                # and create refmag (reference magnitudes) that have the same matched
                # indices
                if customcat != None and filter in customcatfilt:

                    print 'USING CUSTOM CATALOG'
                    in_data = np.loadtxt(imfile)
                    input_coords = in_data[:, :2]
                    cat_data = np.loadtxt(customcat, skiprows=1)
                    cat_coords = cat_data[:, :2]

                    cat_matches, tmp = apd.identify_matches(
                        input_coords, cat_coords)

                    refmag = np.zeros(len(mag)) + 99
                    mode = np.zeros(len(mag)) + -1
                    for i, i_ind in enumerate(cat_matches):
                        if i_ind > 0:
                            #print input_coords[i], cat_coords[i_ind]
                            refmag[i] = cat_data[i_ind, catdict[filter]]
                            mode[i] = 4

                    # If no matching indices, run with the standard catalogs
                    if sum(refmag < 90.0) == 0: nocustomcat = True
                else:
                    nocustomcat = True

                # If custom catalog not provided, catalog doesn't include filter, or
                # no objects from catalog found in image then
                # use get_SEDs.py to make catalog using 2MASS + (SDSS or APASS or USNOB1)
                if nocustomcat:
                    # Create catalog star file
                    # (python get_SEDs.py imfile filter catfile USNOB_THRESH alloptstars)
                    sedcmd = 'python ' + pipevar['getsedcommand'] + ' ' + imfile + ' ' +\
                         filter + ' ' + catfile + " 15 True "+ qtcmd

                    if pipevar['verbose'] > 0: print sedcmd
                    os.system(sedcmd)

                    if not os.path.isfile(catfile):
                        zpts += [float('NaN')]
                        continue

                    # Read in catalog file
                    cvars = np.loadtxt(catfile, unpack=True)
                    refmag = cvars[catdict[filter], :]
                    mode = cvars[catdict['mode'], :]

                # Find catalog filter values and only cutoff values of actual detections
                goodind = (mode != -1) & (refmag < 90.0) & (flag <
                                                            8) & (elon <= 1.5)

                refmag = refmag[goodind]
                obsmag = mag[goodind]
                obserr = mage[goodind]
                obswts = np.zeros(len(obserr))
                obskpm = np.zeros(len(obsmag))

                # Store magnitudes and weights (with minimum magnitude error of 0.01)
                for i in np.arange(len(obsmag)):
                    if obserr[i] < 0.1:
                        obskpm[i] = obsmag[i]
                        obswts[i] = 1.0 / (max(obserr[i], 0.01)**2)
                zpt, scats, rmss = apd.calc_zpt(np.array([refmag]),
                                                np.array([obskpm]),
                                                np.array([obswts]),
                                                sigma=3.0)

                # Reload because we had to remove distortion parameters before
                head = pf.getheader(sfile)
                data = pf.getdata(sfile)
                head['ABSZPT'] = (zpt[0] + 25.0,
                                  'Relative zeropoint from calc_zpt')
                head['ABSZPTSC'] = (scats[0],
                                    'Robust scatter of relative zeropoint')
                head['ABSZPRMS'] = (rmss[0], 'RMS of relative zeropoint')

                pf.update(sfile, data, head)
                zpts += zpt

            # Move files with bad zeropoint calculations to folder 'badzptfit'
            # and do not use those frames
            zpts = np.array(zpts)
            goodframes = np.isfinite(zpts)
            badframes = ~np.isfinite(zpts)

            if len(zpts[badframes]) != 0:
                if not os.path.exists(pipevar['imworkingdir'] + '/badzptfit'):
                    os.makedirs(pipevar['imworkingdir'] + '/badzptfit')
                for file in stacklist[badframes]:
                    os.system('mv ' + file + ' ' + pipevar['imworkingdir'] +
                              '/badzptfit/')
                zpts = zpts[goodframes]
                newstack = stacklist[goodframes]
            else:
                newstack = stacklist

            badnewflxsc = []
            # Add relative zeropoint values to headers and calculate flux scale.
            # Remove unphysical fluxscale files
            medzp = np.median(zpts)
            for i, file in enumerate(newstack):
                head = pf.getheader(file)
                head['NEWFLXSC'] = (1.0 / (10.0**((zpts[i] - medzp) / 2.5)),
                                    'Flux scaling based on median zp')

                if 1.0 / (10.0**((zpts[i] - medzp) / 2.5)) < 0.1:
                    badnewflxsc += [file]

                data = pf.getdata(file)
                pf.update(file, data, head)

            removedframes = []
            # Removes files that have bad newflxsc values and removes from stack list
            if len(badnewflxsc) > 0:
                if not os.path.exists(pipevar['imworkingdir'] + '/badflxsc'):
                    os.makedirs(pipevar['imworkingdir'] + '/badflxsc')

                for ibad in badnewflxsc:
                    os.system('mv ' + ibad + ' ' + pipevar['imworkingdir'] +
                              'badflxsc/')

                removedframes += badnewflxsc

                # Remove files that have bad newflxsc values from list of stack
                bad = set(badnewflxsc)
                newstack = [x for x in newstack if x not in bad]

            newtextslist = ' '.join(newstack)

            stackcmd = pipevar['swarpcommand']

            # Keywords to carry through
            stackcmd += ' -COPY_KEYWORDS OBJECT,TARGNAME,FILTER,' +\
                        'INSTRUME,PIXSCALE,WAVELENG,DATE-OBS,AIRMASS,FLATFLD,FLATTYPE '

            # Create output variables that will be used by SWarp
            outfl = pipevar['imworkingdir'] + 'coadd' + targ + '_' + re.sub(
                r'[^\w]', '', medtime) + '_' + filter + '.fits'
            outwt = pipevar['imworkingdir'] + 'coadd' + targ + '_' + re.sub(
                r'[^\w]', '', medtime) + '_' + filter + '.weight.fits'

            if pipevar['verbose'] > 0:
                stackcmd += ' -VERBOSE_TYPE NORMAL '
            else:
                stackcmd = stackcmd + ' -VERBOSE_TYPE QUIET '

            # Coadd with flux scale
            stackcmd = stackcmd + ' -SUBTRACT_BACK N -WRITE_XML N -IMAGEOUT_NAME ' +\
                       outfl + ' -WEIGHTOUT_NAME ' + outwt +\
                       ' -FSCALE_KEYWORD NEWFLXSC ' + newtextslist

            if pipevar['verbose'] > 0:
                print stackcmd

            os.system(stackcmd)
            head = pf.getheader(outfl)
            pixscl = head['PIXSCALE']

            try:
                apd.findsexobj(outfl,
                               10.0,
                               pipevar,
                               pix=pixscl,
                               aperture=20.0,
                               wtimage=outwt,
                               quiet=quiet)
            except:
                sys.exit(
                    'Problem opening coadd fits file, may need to coadd in smaller bin size'
                )

            head = pf.getheader(outfl)
            cpsfdi = 1.34 * float(head['SEEPIX'])

            # Run sextractor again on new coadd file
            apd.findsexobj(outfl,
                           3.0,
                           pipevar,
                           pix=pixscl,
                           aperture=cpsfdi,
                           wtimage=outwt,
                           quiet=quiet)

            head = pf.getheader(outfl)

            coaddvars = np.loadtxt(outfl + '.stars', unpack=True)
            xim = coaddvars[1, :]
            yim = coaddvars[2, :]
            mag = coaddvars[3, :]
            mage = coaddvars[4, :]
            flag = coaddvars[5, :]
            elon = coaddvars[8, :]

            # astropy does not like SWarp PV keywords or unicode, temporarily delete
            for key in head.keys():
                if any(x in key
                       for x in ['PV1_', 'PV2_', 'COMMENT', 'HISTORY']):
                    del head[key]

            w = wcs.WCS(head)
            wrd = w.all_pix2world(np.transpose([xim, yim]), 0)
            imfile = outfl + '.im'
            catfile = outfl + '.cat'

            # Save stars from image
            np.savetxt(imfile, np.transpose([wrd[:, 0], wrd[:, 1], mag]))

            # Filter name correction:
            if filter == 'Z' or filter == 'Y': filter = filter.lower()

            nocustomcat = False
            # If custom catalog provided, match the same objects as the *.im file
            # and create refmag (reference magnitudes) that have the same matched
            # indices
            if customcat != None and filter in customcatfilt:

                print 'USING CUSTOM CATALOG'
                in_data = np.loadtxt(imfile)
                input_coords = in_data[:, :2]
                cat_data = np.loadtxt(customcat, skiprows=1)
                cat_coords = cat_data[:, :2]

                cat_matches, tmp = apd.identify_matches(
                    input_coords, cat_coords)

                refmag = np.zeros(len(mag)) + 99
                mode = np.zeros(len(mag)) + -1
                for i, i_ind in enumerate(cat_matches):
                    if i_ind > 0:
                        #print input_coords[i], cat_coords[i_ind]
                        refmag[i] = cat_data[i_ind, catdict[filter]]
                        mode[i] = 4

                # If no matching indices, run with the standard catalogs
                if sum(refmag < 90.0) == 0: nocustomcat = True
            else:
                nocustomcat = True

            # If custom catalog not provided, catalog doesn't include filter, or
            # no objects from catalog found in image then
            # use get_SEDs.py to make catalog using 2MASS + (SDSS or APASS or USNOB1)
            if nocustomcat:
                # Create catalog star file
                # (python get_SEDs.py imfile filter catfile USNOB_THRESH alloptstars)
                sedcmd = 'python ' + pipevar['getsedcommand'] + ' ' + imfile + ' ' +\
                    filter + ' ' + catfile + " 15 True "+ qtcmd

                if pipevar['verbose'] > 0: print sedcmd
                os.system(sedcmd)

                if not os.path.isfile(catfile):
                    zpts += [float('NaN')]
                    continue

                # Read in catalog file
                cvars = np.loadtxt(catfile, unpack=True)
                refmag = cvars[catdict[filter], :]
                mode = cvars[catdict['mode'], :]

            # Find catalog filter values and only cutoff values of actual detections
            goodind = (mode != -1) & (refmag < 90.0) & (flag < 8) & (elon <=
                                                                     1.3)

            refmag = refmag[goodind]
            obsmag = mag[goodind]
            obserr = mage[goodind]
            obswts = np.zeros(len(obserr))
            obskpm = np.zeros(len(obsmag))

            # Store magnitudes and weights (with minimum magnitude error of 0.01)
            for i in np.arange(len(obsmag)):
                if obserr[i] < 0.1:
                    obskpm[i] = obsmag[i]
                    obswts[i] = 1.0 / (max(obserr[i], 0.01)**2)

            czpts, cscats, crmss = apd.calc_zpt(
                np.array([refmag]),
                np.array([obskpm]),
                np.array([obswts]),
                sigma=1.0,
                plotter=pipevar['imworkingdir'] + 'zpt_' + filter + '.ps')

            chead = pf.getheader(outfl)

            # Add zeropoint keywords to header
            chead['SPIX'] = (cpsfdi, 'Final aperture size')
            chead['ABSZPT'] = (czpts[0] + 25.0,
                               'Absolute zeropoint from calc_zpt')
            chead['ABSZPTSC'] = (cscats[0],
                                 'Robust scatter of absolute zeropoint')
            chead['ABSZPRMS'] = (crmss[0], 'RMS of absolute zeropoint')

            # Add summary of stack information to header
            chead['DATE1'] = (firsttime, 'First frame time')
            chead['DATEN'] = (lasttime, 'Last frame time')
            chead['DATE'] = (medtime, 'Median frame time')
            chead['NSTACK'] = nstack
            chead['AIRMASS'] = (medair, 'Median exposure airmass')
            chead['AIRMIN'] = (minair, 'Minimum exposure airmass')
            chead['AIRMAX'] = (maxair, 'Maximum exposure airmass')
            chead['EXPTIME'] = (medexp, 'Effective rescaled exposure time')
            chead['TOTALEXP'] = (totexp, 'Total summed integration time')
            chead['MAXEXP'] = (max(stackexps), 'Length of longest exposure')
            chead['MINEXP'] = (min(stackexps), 'Length of shortest exposure')

            for i, file in enumerate(newstack):
                chead['STACK' + str(i)] = file

                cdata = pf.getdata(outfl)
                pf.update(outfl, cdata, chead)

            if len(removedframes) > 0:
                print 'Removed frames with bad zeropoint fits: '
                print removedframes

    # If remove intermediate files keyword set, delete p(PREFIX)*.fits, fp(PREFIX)*.fits,
    # sky-*.fits, sfp(PREFIX)*.fits, zsfp(PREFIX)*.fits files
    if pipevar['rmifiles'] != 0:

        os.system('rm -f ' + pipevar['imworkingdir'] + 'p' +
                  pipevar['prefix'] + '*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'fp' +
                  pipevar['prefix'] + '*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + '*sky-*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'sfp' +
                  pipevar['prefix'] + '*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'zsfp' +
                  pipevar['prefix'] + '*.fits')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'a*fp' +
                  pipevar['prefix'] + '*.im')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'a*fp' +
                  pipevar['prefix'] + '*.stars')
        os.system('rm -f ' + pipevar['imworkingdir'] + 'a*fp' +
                  pipevar['prefix'] + '*.cat')
Exemplo n.º 17
0
        tmp_hdr.update('OBS-MID',
                       str(midtime_UT).replace(" ", "T"),
                       comment='Time for midtime of exposure in UT')
        tmp_hdr.update('JD-MID',
                       float("%.7f" % float(midtime_jd)),
                       comment='Time for midtime of exposure in Julian date')
        tmp_hdr.update('MJD-MID',
                       float("%.7f" % float(midtime_mjd)),
                       comment='Time for midtime of exposure in Modified JD')
        tmp_hdr.update('ST-MID',
                       str(sidereal_time),
                       comment='Midtime of exposure in local sidereal time')
        #      tmp_hdr.update('BJD-MID', float("%.7f" % float(BJD)), comment='UT Barycentric midtime of exp. BJD-2400000')
        #      tmp_hdr.update('BVC', float("%.7f" % float(BVC)), comment='Barycentric velocity correction in Km/s')
        # ----------------------------------------------------------------------------------------------------------
        tmp_hdr.update('---DB---',
                       '------DEBUG-----',
                       comment='-------------------------------------')
        tmp_hdr.update('UP-TIME',
                       c_time,
                       comment='Used seconds to update the header')
        tmp_hdr.update(
            'EXP-READ',
            used_exp_time,
            comment='Exposure + readout time in seconds from imclass')

        ########################## Here the modified FITS header is updated and saved ######################################
        pyfits.update(fits_file, tmp_im, tmp_hdr)

        return 1
Exemplo n.º 18
0
def astrometry(atfimages, scamprun=1, pipevar=None):

    """
    NAME:
        astrometry
    PURPOSE:
        Run sextractor and scamp to refine astrometric solution
    INPUTS:
        atfimages - list of files to run through scamp
        scamprun  - the first run does a LOOSE run with distortion degree 1, any
                    other run will look for high distortion parameters, if it
                    finds it will use distortion degree 7, otherwise 3 (will also cut out
                    FLXSCALE on runs after 1)
    EXAMPLE:
        astrometry(atfimages, scamprun=2, pipevar=pipevar)
    FUTURE IMPROVEMENTS:
        Better difference between scamp runs.
    """
        
    acatlist = ''
    scat = {'sdss': 'SDSS-R7', 'tmpsc': '2MASS', 'tmc': '2MASS', 'ub2': 'USNO-B1'} 

    for cfile in atfimages:
        head = pf.getheader(cfile)
        pixscale  = head['PIXSCALE']
        sourcecat = head['ASTR_CAT']
                
        trunfile = os.path.splitext(cfile)[0]
                
        if pipevar['verbose'] > 0:
            sexcom = pipevar['sexcommand'] + ' -CATALOG_NAME ' + trunfile + \
                    '.cat -CATALOG_TYPE FITS_LDAC -FILTER_NAME astrom.conv ' + \
                    '-PARAMETERS_NAME astrom.param -DETECT_THRESH 2.0 ' + \
                    '-ANALYSIS_THRESH 2.0 -PIXEL_SCALE ' + str(pixscale) + \
                    ' ' + cfile
            print sexcom
        else:
            sexcom = pipevar['sexcommand'] + ' -CATALOG_NAME ' + trunfile + \
                    '.cat -CATALOG_TYPE FITS_LDAC -FILTER_NAME astrom.conv ' + \
                    '-PARAMETERS_NAME astrom.param -DETECT_THRESH 2.0 ' + \
                    '-ANALYSIS_THRESH 2.0 -VERBOSE_TYPE QUIET -PIXEL_SCALE ' + \
                    str(pixscale) + ' ' + cfile
                
        os.system(sexcom)
                
        if head['ASTR_NUM'] > 0: acatlist += ' ' + trunfile + '.cat'
                
    if sourcecat in scat:
        cat_u = scat[sourcecat]
    else:
        cat_u = 'NONE'
        print 'No valid catalogs available for SCAMP, check that ' +\
                'vlt_autoastrometry.py ran correctly'
        return
    
    if scamprun == 1:
        loose = ' -MOSAIC_TYPE LOOSE'
        distdeg = 1
    else:
        loose = ' '
        try:
            distort = head['PV1_37']
            distdeg = 7
        except:
            distdeg = 3 
                
    if pipevar['verbose'] > 0:
        scampcmd = "scamp -POSITION_MAXERR 0.2 -DISTORT_DEGREES " + str(distdeg)+\
                    loose + " -ASTREF_CATALOG " + cat_u + \
                    " -SOLVE_PHOTOM N -SN_THRESHOLDS 3.0,10.0 " + \
                    "-CHECKPLOT_DEV NULL -WRITE_XML N -VERBOSE_TYPE FULL " +\
                    acatlist
        print scampcmd
    else:
        scampcmd = "scamp -POSITION_MAXERR 0.2 -DISTORT_DEGREES " + str(distdeg)+\
                    loose + " -ASTREF_CATALOG " + cat_u + \
                    " -SOLVE_PHOTOM N -SN_THRESHOLDS 3.0,10.0 " + \
                    "-CHECKPLOT_DEV NULL -WRITE_XML N -VERBOSE_TYPE QUIET " +\
                    acatlist                                    
                
    os.system(scampcmd)
    os.system('rm ' + acatlist)
            
    # Adds header information to file and delete extra files
    for cfile in atfimages:
        trunfile = os.path.splitext(cfile)[0]
                
        if pipevar['verbose'] > 0:
            os.system('missfits -WRITE_XML N ' + cfile)
        else:
            os.system('missfits -WRITE_XML N -VERBOSE_TYPE QUIET' + cfile)
                    
        os.system('rm ' + trunfile + '.head ' + cfile + '.back')

        if scamprun != 1:
            him  = pf.getheader(cfile)
            data = pf.getdata(cfile)
            del him['FLXSCALE']
            pf.update(cfile, data, him)
Exemplo n.º 19
0
import shutil
shutil.copyfile(imagen_header_original, FILE_TEMP)

pyfits.info(imagen_header_original)
pyfits.info(FILE_TEMP)
pyfits.info(imagen_in)

imagen_header2 = FILE_TEMP  # deben ser igual al de imagen_header

header = pyfits.getheader(FILE_TEMP)
header2 = pyfits.getheader(FILE_TEMP, 2)

imagen_no_header = pyfits.getdata(imagen_Corre, header=False)

from pyfits import update
pyfits.update(imagen_header2, imagen_no_header, ext=2)
pyfits.update(imagen_header2, imagen_no_header, header2, 2)

###copiando resultado final

FILE_FIN = imagen_end + '_Amp5.fits'
#copiando el archivo
import shutil
shutil.copyfile(FILE_TEMP, FILE_FIN)

#Final Sanity check

pyfits.info(FILE_FIN)

print "Gracias por usar M.Mora scripts!!"
print "mmora at astro.puc.cl"
Exemplo n.º 20
0
def findsexobj(file, sigma, pipevar, masksfx=None, zeropt=25.0, maptype='MAP_WEIGHT',
               wtimage=None, fwhm=1.5, pix=0.3787, aperture=5.0, elong_cut=1.5, 
               quiet=0):
    """
    NAME:
        findsexobj
    PURPOSE:
        Finds sextractor objects with optional inputs. Estimates seeing from stars found. 
    INPUTS:
    	file    - fits file to run sextractor on
    	sigma   - detection threshold and analysis threshold for sextractor
    	pipevar - pipeline parameters (typically set in autopipedefaults or autoproc)
    	
    OPTIONAL KEYWORDS:
    	masksfx   - text string identifier for sextractor CHECKIMAGE_NAME
    	zeropt    - input value for sextractor MAG_ZEROPOINT
    	wtimage   - file for input for sextractor WEIGHT_IMAGE
    	fwhm      - input value for sextractor SEEING_FWHM
    	pix       - input value for sextractor PIXEL_SCALE
    	aperture  - input value for sextractor PHOT_APERTURES
    	elong_cut - cutoff limit for FWHM calculation of elongation to eliminate non-stars
    	quiet     - no output from sextractor if set
    EXAMPLE:
        findsexobj(file, 3.0, pipevar, aperture=20.0)
    DEPENDENCIES:
        sextractor
    FUTURE IMPROVEMENTS:
        More keywords to sextractor?
    """
    
    # Move necessary sextractor configuration files if they are not in current directory
    if not os.path.isfile('coadd.param'): 
        os.system('cp ' + pipevar['defaultspath'] + '/coadd.param .')
    if not os.path.isfile('coadd.conv'): 
        os.system('cp ' + pipevar['defaultspath'] + '/coadd.conv .') 
    if not os.path.isfile('coadd.config'): 
        os.system('cp ' + pipevar['defaultspath'] + '/coadd.config .') 
    if not os.path.isfile('default.nnw'): 
        os.system('cp ' + pipevar['defaultspath'] + '/default.nnw .')  

    if quiet > 0: 
        verbosetype = 'QUIET'
    else:
        verbosetype = 'NORMAL'
        
    # Run sextractor with given input parameters. Saves temp.cat as 
    # starfile, saves starmask, and calculates seeing from starlike objects. Saves 
    # necessary parameters to header
    if file == '': return
    
    if not os.path.isfile(file): return
    starfile = file + '.stars'
        
    trunfile = os.path.splitext(file)[0]
        
    sexcmd = pipevar['sexcommand'] + ' -c coadd.config -DETECT_THRESH ' +\
             str(sigma) + ' -ANALYSIS_THRESH ' + str(sigma) + ' -PHOT_APERTURES ' +\
             str(aperture) + ' -MAG_ZEROPOINT ' + str(zeropt) + ' -PIXEL_SCALE ' +\
             str(pix) + ' -SEEING_FWHM ' + str(fwhm) + ' -VERBOSE_TYPE ' +verbosetype
    
    if masksfx != None:
        mskimg = trunfile + '_' + masksfx + '.fits'
        sexcmd += ' -CHECKIMAGE_TYPE OBJECTS' + ' -CHECKIMAGE_NAME ' + mskimg
        
    if wtimage != None:
        sexcmd += ' -WEIGHT_TYPE '+maptype+' -WEIGHT_IMAGE ' + wtimage + ' '
        
    sexcmd += ' ' + file
    if quiet == 0: print sexcmd
    os.system(sexcmd)
        
    if quiet == 0: print 'mv -f test.cat ' + starfile
    os.system('mv -f test.cat ' + starfile)
    
    num = 0    
    # Calculates seeing with starlike objects
    if os.path.isfile(starfile):
        vars   = np.loadtxt(starfile, unpack=True)
        num    = vars[0,:]
        flag   = vars[5,:]
        elon   = vars[8,:]
        fwhmim = vars[9,:]
        keep = (flag ==0) & (elon < elong_cut) & (fwhmim > fwhm) & (fwhmim < 20.0)

        if sum(keep) <= 1: 
            seepix = float('NAN')
        else:
            seepix = np.median(fwhmim[keep])        
    else:
        print 'Failed to find Sextractor output file!'
        seepix = float('NaN')
	 
    head = pf.getheader(file)
    
    if masksfx != None:
        head['MASKNAME'] = (mskimg, "Object mask image from Sextractor")
    
    head['STARFILE'] = (starfile, "Objects file from Sextractor" )
    head['ZEROPT']   = (zeropt, "Photometric zero-point used for Sextractor")
    head['SEEPIX']   = (seepix, "Estimated seeing from Sextractor objects (pix)")
    head['NSTARS']   = (len(num), "Estimated number of objects from Sextractor")
    
    data = pf.getdata(file)
    pf.update(file, data, head)
    
    # Removes config files after done
    os.system('rm -f coadd.param')
    os.system('rm -f coadd.conv')
    os.system('rm -f coadd.config')
    os.system('rm -f default.nnw')
Exemplo n.º 21
0
                ### Strange that I have to multiply with the flat field.... maybe the way I made it is wrong.
                new_im = (new_im / flat).astype(numpy.int16)
            except Exception, e:
                print e


#################################################################################################

#	if test_value == 1:
#	   try:
#	      pyfits.update("/home/madsfa/SkyCam/images/astrometry/test_smooth.fits",new_im,hdr)
#          except Exception, e:
#	      pyfits.writeto("/home/madsfa/SkyCam/images/astrometry/test_smooth.fits",new_im,hdr)
#	else:
        try:
            pyfits.update(filename, new_im, hdr)
        except Exception, e:
            pyfits.writeto(filename, new_im, hdr, uint=True)

    def clean_im_array(self, im_arr, test_value):
        """
           This function does the same as the one above but instead of making a new file it overwrites the original one. 
        """

        dimx, dimy = im_arr.shape  # The dimensions of the image.
        mean_value = numpy.mean(im_arr)
        std_value = numpy.std(im_arr)

        ii = numpy.where(
            im_arr >= mean_value + (std_value)
        )  # this finds all pixels with value bigger then two times the standard deviation.
def astrometry(atfimages, scamprun=1, pipevar=None):
    """
    NAME:
        astrometry
    PURPOSE:
        Run sextractor and scamp to refine astrometric solution
    INPUTS:
        atfimages - list of files to run through scamp
        scamprun  - the first run does a LOOSE run with distortion degree 1, any
                    other run will look for high distortion parameters, if it
                    finds it will use distortion degree 7, otherwise 3 (will also cut out
                    FLXSCALE on runs after 1)
    EXAMPLE:
        astrometry(atfimages, scamprun=2, pipevar=pipevar)
    FUTURE IMPROVEMENTS:
        Better difference between scamp runs.
    """

    acatlist = ''
    scat = {
        'sdss': 'SDSS-R7',
        'tmpsc': '2MASS',
        'tmc': '2MASS',
        'ub2': 'USNO-B1'
    }

    for cfile in atfimages:
        head = pf.getheader(cfile)
        pixscale = head['PIXSCALE']
        sourcecat = head['ASTR_CAT']

        trunfile = os.path.splitext(cfile)[0]

        if pipevar['verbose'] > 0:
            sexcom = pipevar['sexcommand'] + ' -CATALOG_NAME ' + trunfile + \
                    '.cat -CATALOG_TYPE FITS_LDAC -FILTER_NAME astrom.conv ' + \
                    '-PARAMETERS_NAME astrom.param -DETECT_THRESH 2.0 ' + \
                    '-ANALYSIS_THRESH 2.0 -PIXEL_SCALE ' + str(pixscale) + \
                    ' ' + cfile
            print sexcom
        else:
            sexcom = pipevar['sexcommand'] + ' -CATALOG_NAME ' + trunfile + \
                    '.cat -CATALOG_TYPE FITS_LDAC -FILTER_NAME astrom.conv ' + \
                    '-PARAMETERS_NAME astrom.param -DETECT_THRESH 2.0 ' + \
                    '-ANALYSIS_THRESH 2.0 -VERBOSE_TYPE QUIET -PIXEL_SCALE ' + \
                    str(pixscale) + ' ' + cfile

        os.system(sexcom)

        if head['ASTR_NUM'] > 0: acatlist += ' ' + trunfile + '.cat'

    if sourcecat in scat:
        cat_u = scat[sourcecat]
    else:
        cat_u = 'NONE'
        print 'No valid catalogs available for SCAMP, check that ' +\
                'vlt_autoastrometry.py ran correctly'
        return

    if scamprun == 1:
        loose = ' -MOSAIC_TYPE LOOSE'
        distdeg = 1
    else:
        loose = ' '
        try:
            distort = head['PV1_37']
            distdeg = 7
        except:
            distdeg = 3

    if pipevar['verbose'] > 0:
        scampcmd = "scamp -POSITION_MAXERR 0.2 -DISTORT_DEGREES " + str(distdeg)+\
                    loose + " -ASTREF_CATALOG " + cat_u + \
                    " -SOLVE_PHOTOM N -SN_THRESHOLDS 3.0,10.0 " + \
                    "-CHECKPLOT_DEV NULL -WRITE_XML N -VERBOSE_TYPE FULL " +\
                    acatlist
        print scampcmd
    else:
        scampcmd = "scamp -POSITION_MAXERR 0.2 -DISTORT_DEGREES " + str(distdeg)+\
                    loose + " -ASTREF_CATALOG " + cat_u + \
                    " -SOLVE_PHOTOM N -SN_THRESHOLDS 3.0,10.0 " + \
                    "-CHECKPLOT_DEV NULL -WRITE_XML N -VERBOSE_TYPE QUIET " +\
                    acatlist

    os.system(scampcmd)
    os.system('rm ' + acatlist)

    # Adds header information to file and delete extra files
    for cfile in atfimages:
        trunfile = os.path.splitext(cfile)[0]

        if pipevar['verbose'] > 0:
            os.system('missfits -WRITE_XML N ' + cfile)
        else:
            os.system('missfits -WRITE_XML N -VERBOSE_TYPE QUIET' + cfile)

        os.system('rm ' + trunfile + '.head ' + cfile + '.back')

        if scamprun != 1:
            him = pf.getheader(cfile)
            data = pf.getdata(cfile)
            del him['FLXSCALE']
            pf.update(cfile, data, him)
Exemplo n.º 23
0
class SMOOTH(object):
    """
      @brief: his class cleans an image from hot pixels.
   """
    def clean(self, path, filename):
        """
           This function finds all pixels where the value is greater then 2 times (adjustable) the standard deviation from the mean value. 
           For these pixels it calculates the mean value of the 8 pixels around the one with the high value and sets the middle one the this mean value. 
           This will result in a removal of all hot pixels with a higher value then 2 times std. It will also affect the real stars, but since these are 
           covering more then one pixel the effect is small enough to keep stars visible. 
	"""

        # image is the path to the image file.
        im, hdr = pyfits.getdata(path + filename, header=True)
        #try:
        #	tmp_fits = pyfits.open(filename, int16=True)#, do_not_scale_image_data = True)
        #	im = tmp_fits[0].data
        #	hdr = tmp_fits[0].header
        #except Exception, e:
        #	print e
        #	print "Error: ", e

        dimx, dimy = im.shape  # The dimensions of the image.
        mean_value = numpy.mean(im)
        std_value = numpy.std(im)

        #new_im = im

        ii = numpy.where(
            im >= mean_value + (2 * std_value)
        )  # this finds all pixels with value bigger then two times the standard deviation.

        ix, iy = ii

        px_1 = ix + numpy.ones(len(ix), dtype=numpy.int)
        py_1 = iy + numpy.ones(len(ix), dtype=numpy.int)
        bad_x = numpy.where(
            px_1 >= (dimx - 1)
        )  # These two lines checks that the index remains inside image dimensions.
        bad_y = numpy.where(
            py_1 >= (dimy - 1)
        )  # These two lines checks that the index remains inside image dimensions.
        px_1[
            bad_x] = 0  # And sets index value to zero if outside of image dimensions.
        py_1[
            bad_y] = 0  # And sets index value to zero if outside of image dimensions.

        px_2 = ix + numpy.ones(len(ix), dtype=numpy.int)
        py_2 = iy
        bad_x = numpy.where(px_2 >= (dimx - 1))
        bad_y = numpy.where(py_2 >= (dimy - 1))
        px_2[bad_x] = 0
        py_2[bad_y] = 0

        px_3 = ix + numpy.ones(len(ix), dtype=numpy.int)
        py_3 = iy - numpy.ones(len(ix), dtype=numpy.int)
        bad_x = numpy.where(px_3 >= (dimx - 1))
        bad_y = numpy.where(py_3 >= (dimy - 1))
        px_3[bad_x] = 0
        py_3[bad_y] = 0

        px_4 = ix - numpy.ones(len(ix), dtype=numpy.int)
        py_4 = iy - numpy.ones(len(ix), dtype=numpy.int)
        bad_x = numpy.where(px_4 >= (dimx - 1))
        bad_y = numpy.where(py_4 >= (dimy - 1))
        px_4[bad_x] = 0
        py_4[bad_y] = 0

        px_5 = ix - numpy.ones(len(ix), dtype=numpy.int)
        py_5 = iy + numpy.ones(len(ix), dtype=numpy.int)
        bad_x = numpy.where(px_5 >= (dimx - 1))
        bad_y = numpy.where(py_5 >= (dimy - 1))
        px_5[bad_x] = 0
        py_5[bad_y] = 0

        px_6 = ix
        py_6 = iy + numpy.ones(len(ix), dtype=numpy.int)
        bad_x = numpy.where(px_6 >= (dimx - 1))
        bad_y = numpy.where(py_6 >= (dimy - 1))
        px_6[bad_x] = 0
        py_6[bad_y] = 0

        px_7 = ix
        py_7 = iy - numpy.ones(len(ix), dtype=numpy.int)
        bad_x = numpy.where(px_7 >= (dimx - 1))
        bad_y = numpy.where(py_7 >= (dimy - 1))
        px_7[bad_x] = 0
        py_7[bad_y] = 0

        px_8 = ix - numpy.ones(len(ix), dtype=numpy.int)
        py_8 = iy
        bad_x = numpy.where(px_8 >= (dimx - 1))
        bad_y = numpy.where(py_8 >= (dimy - 1))
        px_8[bad_x] = 0
        py_8[bad_y] = 0

        # Here the mean value of the 8 evaluated pixels is calculated. In the first and last row and column the values is not correct, but this does not matter.
        cal_around = numpy.zeros((dimx, dimy))
        cal_around[ii] = (im[px_1, py_1] + im[px_2, py_2] + im[px_3, py_3] +
                          im[px_4, py_4] + im[px_5, py_5] + im[px_6, py_6] +
                          im[px_7, py_7] + im[px_8, py_8]) / 8.0
        jj = numpy.where(cal_around <= (mean_value + 2 * std_value))
        new_cal_array = numpy.zeros((dimx, dimy))
        new_cal_array[jj] = cal_around[jj]
        tt = numpy.where(new_cal_array > 0.0)
        im[tt] = cal_around[tt]
        new_im = im

        ################################### This divides with the flat field ############################
        try:
            flat = pyfits.getdata(path_to_flat)
            new_im = (new_im / flat).astype(numpy.int16)
        except Exception, e:
            print e
#################################################################################################

        f_name, f_end = filename.split('.')
        try:
            pyfits.update(path + f_name + '_s.fits', new_im, hdr)
        except Exception, e:
            pyfits.writeto(path + f_name + '_s.fits', new_im, hdr)
Exemplo n.º 24
0
    field, dither, num = obj[1].split()
    # Correct for a typo in the naming.
    if obj[0] == 'c205p08+46p7':
        obj[0] = 'c250p08+46p7'

    # load data and skysubtract
    data = oimg[1].data
    dataHDR = oimg[1].header

    sky = skySubtract(data)

    if not os.path.isfile(obj[0]+'_'+field+'_'+num+'.fits'):
        # rewriting the whole file because that is easy to update
        oimg.writeto(obj[0]+'_'+field+'_'+num+'.fits')
        # update with sky subtraction
        pyf.update(obj[0]+'_'+field+'_'+num+'.fits', data, dataHDR, 1)
    else:
        # Here's the data we are going to add to
        img = pyf.open(obj[0]+'_'+field+'_'+num+'.fits')
        data1 = img[1].data
        dataHDR1 = img[1].header
        try:
            pyf.update(obj[0]+'_'+field+'_'+num+'.fits', data1+data,
                    dataHDR, 1)
        except ValueError:
            print 'Different lengths'
            # Make sure all of the arrays are the same length
            if data.shape[1] > data1.shape[1]:
                sky.pop(-1*(data.shape[1]-data1.shape[1]))
                data = delete(data, -1*(data.shape[1]-data1.shape[1]), 1)
            elif data.shape[1] < data1.shape[1]:
def findsexobj(file,
               sigma,
               pipevar,
               masksfx=None,
               zeropt=25.0,
               maptype='MAP_WEIGHT',
               wtimage=None,
               fwhm=1.5,
               pix=0.3787,
               aperture=5.0,
               elong_cut=1.5,
               quiet=0):
    """
    NAME:
        findsexobj
    PURPOSE:
        Finds sextractor objects with optional inputs. Estimates seeing from stars found. 
    INPUTS:
    	file    - fits file to run sextractor on
    	sigma   - detection threshold and analysis threshold for sextractor
    	pipevar - pipeline parameters (typically set in autopipedefaults or autoproc)
    	
    OPTIONAL KEYWORDS:
    	masksfx   - text string identifier for sextractor CHECKIMAGE_NAME
    	zeropt    - input value for sextractor MAG_ZEROPOINT
    	wtimage   - file for input for sextractor WEIGHT_IMAGE
    	fwhm      - input value for sextractor SEEING_FWHM
    	pix       - input value for sextractor PIXEL_SCALE
    	aperture  - input value for sextractor PHOT_APERTURES
    	elong_cut - cutoff limit for FWHM calculation of elongation to eliminate non-stars
    	quiet     - no output from sextractor if set
    EXAMPLE:
        findsexobj(file, 3.0, pipevar, aperture=20.0)
    DEPENDENCIES:
        sextractor
    FUTURE IMPROVEMENTS:
        More keywords to sextractor?
    """

    # Move necessary sextractor configuration files if they are not in current directory
    if not os.path.isfile('coadd.param'):
        os.system('cp ' + pipevar['defaultspath'] + '/coadd.param .')
    if not os.path.isfile('coadd.conv'):
        os.system('cp ' + pipevar['defaultspath'] + '/coadd.conv .')
    if not os.path.isfile('coadd.config'):
        os.system('cp ' + pipevar['defaultspath'] + '/coadd.config .')
    if not os.path.isfile('default.nnw'):
        os.system('cp ' + pipevar['defaultspath'] + '/default.nnw .')

    if quiet > 0:
        verbosetype = 'QUIET'
    else:
        verbosetype = 'NORMAL'

    # Run sextractor with given input parameters. Saves temp.cat as
    # starfile, saves starmask, and calculates seeing from starlike objects. Saves
    # necessary parameters to header
    if file == '': return

    if not os.path.isfile(file): return
    starfile = file + '.stars'

    trunfile = os.path.splitext(file)[0]

    sexcmd = pipevar['sexcommand'] + ' -c coadd.config -DETECT_THRESH ' +\
             str(sigma) + ' -ANALYSIS_THRESH ' + str(sigma) + ' -PHOT_APERTURES ' +\
             str(aperture) + ' -MAG_ZEROPOINT ' + str(zeropt) + ' -PIXEL_SCALE ' +\
             str(pix) + ' -SEEING_FWHM ' + str(fwhm) + ' -VERBOSE_TYPE ' +verbosetype

    if masksfx != None:
        mskimg = trunfile + '_' + masksfx + '.fits'
        sexcmd += ' -CHECKIMAGE_TYPE OBJECTS' + ' -CHECKIMAGE_NAME ' + mskimg

    if wtimage != None:
        sexcmd += ' -WEIGHT_TYPE ' + maptype + ' -WEIGHT_IMAGE ' + wtimage + ' '

    sexcmd += ' ' + file
    if quiet == 0: print sexcmd
    os.system(sexcmd)

    if quiet == 0: print 'mv -f test.cat ' + starfile
    os.system('mv -f test.cat ' + starfile)

    num = 0
    # Calculates seeing with starlike objects
    if os.path.isfile(starfile):
        vars = np.loadtxt(starfile, unpack=True)
        num = vars[0, :]
        flag = vars[5, :]
        elon = vars[8, :]
        fwhmim = vars[9, :]
        keep = (flag
                == 0) & (elon < elong_cut) & (fwhmim > fwhm) & (fwhmim < 20.0)

        if sum(keep) <= 1:
            seepix = None
        else:
            seepix = np.median(fwhmim[keep])
    else:
        print 'Failed to find Sextractor output file!'
        seepix = None

    head = pf.getheader(file)

    if masksfx != None:
        head['MASKNAME'] = (mskimg, "Object mask image from Sextractor")

    head['STARFILE'] = (starfile, "Objects file from Sextractor")
    head['ZEROPT'] = (zeropt, "Photometric zero-point used for Sextractor")
    if seepix != None:
        head['SEEPIX'] = (seepix,
                          "Estimated seeing from Sextractor objects (pix)")
    head['NSTARS'] = (len(num), "Estimated number of objects from Sextractor")

    data = pf.getdata(file)
    pf.update(file, data, head)

    # Removes config files after done
    os.system('rm -f coadd.param')
    os.system('rm -f coadd.conv')
    os.system('rm -f coadd.config')
    os.system('rm -f default.nnw')
def add_hbeta_sources_LAST(dir, subdir, velfile, otherfile, sfrhistfile,
                           snapnum):
    """Reads Laura's velfile and otherfile and gets the Hbeta
    luminosity for the NLR particles. Then adds these particles as
    pure Hbeta sources to the specified sfrhist file."""

    vpath = dir + '/' + subdir + '/' + velfile
    opath = dir + '/' + subdir + '/' + otherfile

    print dir, subdir, velfile, otherfile
    print vpath
    print opath

    velf = read_hbeta_file(vpath, snapnum)
    otherf = read_hbeta_file(opath, snapnum)

    hbetalum = velf[:, -1]
    print "Total Hbeta luminosity: %g Lsun = %g W" % (sum(hbetalum),
                                                      sum(hbetalum) * 3.83e26)

    print "Reading data for existing sources"
    sys.stdout.flush()
    sf = pyfits.open(sfrhistfile)

    phdu = sf['PARTICLEDATA']

    nold = phdu.data.shape[0]
    nadd = velf.shape[0]

    lam = sf['LAMBDA'].data.field('lambda')
    #pdb.set_trace()
    sed = phdu.data.field('L_lambda')
    pos = phdu.data.field('position')
    vel = phdu.data.field('velocity')
    age = phdu.data.field('age')

    # figure out which BH is which. Laura's file contains the pos of
    # the particle in ref to the BH, so by subtracting those vectors
    # we get - delta pos of the black holes. We want the position in
    # relation to the black hole that's the first column in her files.
    # (Laura's note: last particles in unmodified fits file are BHs, and
    # have age=nan.
    bhpart = where(age != age)[0]

    if bhpart.shape[0] == 1:
        # if we only have one then it's easy
        bhpos = pos[bhpart[0], :]
        # check that the other bh has junk
        assert otherf[0, 3] == -1
        nbh = 1
    else:
        bhdeltapos = pos[bhpart[0], :] - pos[bhpart[1], :]
        lbhdeltapos = otherf[0, 0:3] - otherf[0, 3:6]
        #if these vectors are parallel we have the BHs switched
        nbh = 2
        if dot(bhdeltapos, lbhdeltapos) > 0:
            bhpos = pos[bhpart[1], :]
        else:
            bhpos = pos[bhpart[0], :]

    print "Generating new PARTICLEDATA table"
    sys.stdout.flush()

    # create new table hdu. this also copies the data
    hdu = pyfits.new_table(phdu.columns, header=phdu.header, nrows=nold + nadd)

    hbetased = array([make_hbeta_sed(hbetalum[i], lam) for i in range(nadd)])
    print "Integrated Hbeta L_lambda: %g W/m" % (sum(hbetased))

    print "hbetased:", hbetased.shape
    ised_hbeta = array([sum(hbetased[:, i]) for i in range(lam.size)])
    print "ised_hbeta:", ised_hbeta.shape

    # append new data
    hdu.data.field('position')[nold:] = otherf[:, 0:3] + bhpos
    hdu.data.field('velocity')[nold:] = velf[:, 1:4] * 1.0226903e-09
    hdu.data.field('L_lambda')[nold:] = log10(hbetased + 1e-5)
    hdu.data.field('L_bol')[nold:] = velf[:, -1] * 3.83e26

    # calculate particle size from density and mass
    h = sf['gadget'].header['HubbleParam']
    mtomsun = sf['gadget'].header['UnitMass_in_g'] * 5.0273993e-34 / h
    ltokpc = sf['gadget'].header['UnitLength_in_cm'] * 3.2407793e-22 / h

    mass = otherf[:, 8] * mtomsun
    density = otherf[:, 6] * mtomsun / ltokpc**3

    hdu.data.field('radius')[nold:] = (3 * mass / (4 * pi * density))**(1. / 3)

    # fudge up new data
    hdu.data.field('ID')[nold:] = hdu.data.field('ID')[nold - 1] + range(
        1, nadd + 1)
    hdu.data.field('mass')[nold:] = 0
    hdu.data.field('metallicity')[nold:] = 0
    hdu.data.field('formation_time')[nold:] = 0
    hdu.data.field('parent_ID')[nold:] = 0
    hdu.data.field('age')[nold:] = 0
    hdu.data.field('creation_mass')[nold:] = 0.

    # update the integrated sed
    ihdu = sf['INTEGRATED_QUANTITIES']
    ised = ihdu.data.field('L_lambda')
    print "lg_ised", ised.shape, ised.dtype
    print "lg_ised[101]=", ised[101]
    print "ised_hbeta[101]=", ised_hbeta[101]
    ised_new = ised + ised_hbeta + 1e-5
    print "lg_ised_new", ised_new.shape
    print "lg_ised_new[101]", ised_new[101]
    ihdu.data.field('L_lambda')[:] = ised_new

    sf.close

    # add keyword with NLR particle info
    hdu.header.update('NNLR',
                      nadd,
                      'number of NLR particles added',
                      after='LOGFLUX')
    hdu.header.update('NLRDIR',
                      subdir,
                      'NLR data subdirectory',
                      after='LOGFLUX')

    print "Updating file %s" % sfrhistfile
    sys.stdout.flush()

    pyfits.update(sfrhistfile, hdu.data, hdu.header, 'PARTICLEDATA')

    pyfits.update(sfrhistfile, ihdu.data, ihdu.header, 'INTEGRATED_QUANTITIES')
Exemplo n.º 27
0
def imcombine(im_list, output_name, method,  
              lo_sig = 10, hi_sig = 3, overwrite= False):
# Image Combination Script # 
# Inputs:
#   im_list = mist be a python list of images or "@listfile"
#   output_name =  name of combined fits image 
#   method = The method to use for combining (median, average, sum)
#   lo_sig = low sigma cliping factor (default = 3 sigma) 
#   hi_sig = high sigma cliping factor (default = 3 sigma)
#   overwrite = if true go ahead and re write existing file 'output_name'
#               if false it will warn you and ask for new output_name. 
#               (default false)
# Output:
#   After succefully combining, calculateing airmass, and writing to fits file, 
#   The return of this function is the name of the combined 
#   image (Output_name).
    print "\n====================\n" 
    print "Combining Images:"
    print "Using %s of count values." % method 
    print "Sigma Cliping Factors (low, high): (%s, %s)\n" % (lo_sig, hi_sig)
    
    # Read image data and put it in a numpy block # 
    Ni = len(im_list)
    for i in range(0, Ni):
        # First size the array to contain the data based on 1st image #
        # Create block with 3 axis:
        #   axis[0] has length of number of images.
        #   axis[1] is the vertical axis of the chip.
        #   axis[2] is the horizontal axis of the chip.
        if i == 0:  
            img_data = fits.getdata(im_list[i])
            n,Ny,Nx = np.shape(img_data)
            img_block = np.ndarray( shape= (Ni,Ny,Nx) )
            img_block[i,:,:] = img_data
        # Then go ahead and read the rest of the images into the block #   
        else: 
            img_block[i,:,:] = fits.getdata(im_list[i])
        # set nan values to zero # 
        img_block[ np.isnan(img_block) ] = 0
        
    # If Zero Additive Scale Images # 
    if im_list[0].lower().__contains__("zero"):
        img_block, Scale= Add_Scale(img_block)
    # If Flats Multiplicative Scale Images # 
    elif im_list[0].lower().__contains__("flat"):
        if im_list[0].lower().__contains__("blue"):
            index = 1.
            img_block, Scale= Mult_Scale(img_block,index)
        elif im_list[0].lower().__contains__("red"):
            index = 2.
            img_block, Scale= Mult_Scale(img_block,index)
    # If Not, Dont Scale # 
    else: 
        print "Did Not Scale Images.\n" 
        Scale= np.empty(Ni)
        Scale[:]= np.NaN
    
    # Print Name and Statistics of Each image % 
    avgarr,stdarr = np.zeros(Ni), np.zeros(Ni)
    for i in range(0,Ni):
        Avg= np.mean(img_block[i,25:75,1700:1800])
        Std= np.std(img_block[i,25:75,1700:1800])
        avgarr[i] = Avg
        stdarr[i] = Std
        print ( "%02d: %s ScaleValue:% .3f Mean: %.3f StDev: %.3f" 
                % (i, im_list[i], Scale[i], Avg, Std) )
    
    #Save Values to diagnostic array
    try:
        if im_list[0].lower().__contains__("zero"):
            diagnostic[0:len(avgarr),1] = avgarr
            diagnostic[0:len(stdarr),2] = stdarr
        if im_list[0].lower().__contains__("flat"):
            if im_list[0].lower().__contains__("blue"):
                diagnostic[0:len(avgarr),5] = avgarr
                diagnostic[0:len(stdarr),6] = stdarr
            elif im_list[0].lower().__contains__("red"):
                diagnostic[0:len(avgarr),9] = avgarr
                diagnostic[0:len(stdarr),10] = stdarr
    except:
        pass
    ## Combine the images acording to input "method" using SigmaClip() above ## 
    comb_img = np.ndarray( shape= (1,Ny,Nx), dtype='float32')
    ##mask_img = np.ndarray( shape= (1,Ny,Nx), dtype='float32')
    while True: # Contunualy askes for method if input is wierd # 
        
        if method == 'median':
            for y in range(0,Ny):
                for x in range(0,Nx):
                    counts = img_block[:,y,x]
                    val = np.median( SigClip(counts, lo_sig, hi_sig) )
                    comb_img[0,y,x] = np.float32(val)
            break # exit while loop 
    
        elif method == 'average':
            for y in range(0,Ny):
                for x in range(0,Nx):
                    counts = img_block[:,y,x]
                    #counts_good, counts_bad = SigClip(counts, lo_sig, hi_sig)
                    val = np.average( SigClip(counts, lo_sig, hi_sig) )
                    #val = np.average(counts_good)
                    comb_img[0,y,x] = np.float32(val)
                    #mask = np.average(counts_bad)
                    #mask_img[0,y,x] = np.float32(mask)
            #mask_image = fits.PrimaryHDU(data=mask_img)
            #mask_image.writeto('Mask.fits')
                                   
            break # exit while loop
        
        elif method == 'sum':
            for y in range(0,Ny):
                for x in range(0,Nx):
                    counts = img_block[:,y,x]
                    val = np.sum( SigClip(counts, lo_sig, hi_sig) )
                    comb_img[0,y,x] = np.float32(val)
            #print img_block[:,100,50]
            #print comb_img[:,100,50]
            break # exit while loop
        
        else:
            # if 'method' input is wanky, ask for method again. 
            print "\nError: Method NOT AVALABLE." 
            print "Available Methods: ('median', 'average', 'sum')"
            print "Enter Valid Method"
            method = raw_input('>>>')
    
    # Set NAN values to zero 
    comb_img[ np.isnan(comb_img) ] = np.float32(0)
    
    ###### Calculate Effetive Airmass for combined image ######
    # The EffAM value is writen into the header in the next section #
    print '\nCalculating Effective Airmass:'    
    
    # if were just combining 2 images #
    if Ni == 2:
        AM0 = SetAirMass(im_list[0])
        AM2 = SetAirMass(im_list[1])
        AM1 = (AM0+AM2)/2
        EffAM = EffectiveAirMass(AM0, AM1, AM2)
        print '\nEffective Airmass of combined image: %5.4f' % EffAM
    # if were combining an odd number of images # 
    elif Ni%2 == 1: 
        images = [ im_list[0], im_list[Ni//2], im_list[-1] ] 
        AM = [ SetAirMass(img) for img in images ] 
        EffAM = EffectiveAirMass( AM[0], AM[1], AM[2] )
        print '\nEffective Airmass of combined image: %5.4f' % EffAM
    # if were combing an even number of images #  
    elif Ni%2 == 0:
        images = [im_list[0], im_list[(Ni//2)-1], im_list[Ni//2], im_list[-1]]
        AM = [ SetAirMass(img) for img in images ]
        EffAM = EffectiveAirMass( AM[0], (AM[1]+AM[2])/2, AM[3])
        print '\nEffective Airmass of combined image: %5.4f' % (EffAM)
    # Otherwise we fail # 
    else:
        print "Eff AirMass calculation failed? This never happens!"
    
    ###### Overwrite Protection loop, just in case ######
    if overwrite == False:
        from os.path import isfile
    
    while overwrite == False: # Outer Loop #  
    # Breaks if file name doesnot exist or overwrite == true # 
        exist = isfile(output_name) # Asks computer if file name exist # 
        
        if exist == False: 
            print "\nWriting combined image to fits file",output_name,"..." 
            break # Break out of outter loop and continue writing # 
        elif exist == True:
            while True: # Inner Loop # 
            # Breaks if user wishes to overwite, abort, or gives new name.
            # loop also checks new names for existance.  
                print "\nFile name",output_name,
                print "already exist do you wish to overwrite?"
                yes_no = raw_input('yes or no ?>>>')
            
                if yes_no == 'no':
                    # If overwrite no: prompt new name or abort # 
                    print"\nEnter new file name or Ctrl-c to Abort "
                    output_name = raw_input('>>>')
                    print "\nNew File Name:= ", output_name
                    break # breaks out of Inner loop only.
                          # Code proceeds to Outer Loop to 
                          # ask computer if new file name exist.     
                
                elif yes_no == 'yes':
                    # If overwrite yes: Break Inner Loop and Outer loop # 
                    overwrite = True
                    print "\nOverwriting Image:", output_name
                    break
                
                else: 
                    # If yes_no input is wierd return to Inner loop
                    # to ask question again. 
                    print "\nInput Not Recognized."
                    
    ###### The following part only runs if above while loop is satisfied ######
    
    # Copy header of first image in im_list and fix degree symbol issue. 
    hdulist = fits.open(im_list[0])
    hdu = hdulist[0]
    # This checks the string and deletes the bad keywords from header. 
    Fix_Header(hdu.header)
    
    # Write Effective Airmass into header # 
    hdu.header.set('AIRMASS',np.round(EffAM,6),'Calculated Effective Airmass')

    #Write date of image combination to header #
    hdu.header.set('DATECOMB', datetime.datetime.now().strftime("%Y-%m-%d"), 'Date of Image combination')
    
    # Write the imcombine information into header #
    N = len(im_list)
    for i in range(0,N):
        num = str(i+1).zfill(3)
        key = 'IMCMB'+num
        hdu.header.append( (key, im_list[i]), useblanks= True, bottom= True )
    hdu.header.append( ('NCOMBINE', N), useblanks= True, bottom = True )
    hdu.header.append( ('COMBTYPE', method,'Operation Used to Combine'),
                      useblanks= True, bottom= True )
    
    # Make sure header BITPIX reflects data encodeing as float 32 ie: -32 
    hdu.header['BITPIX'] = -32
    
    # Write header to new fits file  
    new_file_name= check_file_exist(output_name)
    hdu.writeto(new_file_name, output_verify='warn', clobber= True)
    
    # write combined data to new fits file  # 
    fits.update(output_name, data= comb_img, header= hdu.header, 
                output_verify='warn')
                         
    print ( "\nCombined Image: %s Mean: %.3f StDev: %.3f" 
            % (new_file_name, np.mean(comb_img), np.std(comb_img)) ) 
    return new_file_name