Exemplo n.º 1
0
 def subtract_stackedbias(self, stackedbias, stackedbias_err):
     sb = fits.open(stackedbias)
     sb_err = fits.open(stackedbias_err)
     self.data -= sb[1].data.T
     #self.data_err = np.sqrt(self.data_err**2.0 + sb_err[1].data.T**2.0)
     # TODO: use real RMS error
     self.data_err = np.sqrt(self.data_err**2.0 + 7.0**2.0)
Exemplo n.º 2
0
	def read_radio_fitsfile(self,mpgrp_fitsfile,ipgrp_fitsfile,radiogti_fitsfile):
		self.mpgrp_fitsfile = mpgrp_fitsfile
		self.ipgrp_fitsfile = ipgrp_fitsfile		
		self.radiogti_fitsfile = radiogti_fitsfile

		if not os.path.exists(self.mpgrp_fitsfile):
			raise FileNotFoundError("{} not found.".format(self.mpgrp_fitsfile))
		try:
			self.mpgrp_hdu = fits.open(self.mpgrp_fitsfile)
		except OSError as e:
			raise 
		print("{} is successfully loaded.".format(self.mpgrp_fitsfile))

		if not os.path.exists(self.ipgrp_fitsfile):
			raise FileNotFoundError("{} not found.".format(self.ipgrp_fitsfile))
		try:
			self.ipgrp_hdu = fits.open(self.ipgrp_fitsfile)
		except OSError as e:
			raise 
		print("{} is successfully loaded.".format(self.ipgrp_fitsfile))

		if not os.path.exists(self.radiogti_fitsfile):
			raise FileNotFoundError("{} not found.".format(self.radiogti_fitsfile))
		try:
			self.radiogti_hdu = fits.open(self.radiogti_fitsfile)
		except OSError as e:
			raise 
		print("{} is successfully loaded.".format(self.radiogti_fitsfile))
Exemplo n.º 3
0
    def __init__(self, filename, ext=0):
        """Return a FITS image and the associated header.

        The image is returned as a numpy array. By default, the first HDU is read.
        A different HDU can be specified with the argument 'ext'. The header of the
        image is modified to remove unncessary keywords such as HISTORY and COMMENT,
        as well as keywords associated with a 3rd and 4th dimension (e.g. NAXIS3,
        NAXIS4).
        """
        if not isinstance(filename, list):
            img_hdu = fits.open(filename)
            self.data = img_hdu[ext].data
            self.hdr = clean_header(img_hdu[ext].header)
        else:
            img_hdr = []
            img_data = []
            if ext == 0:
                ext = [ext] * len(filename)
            elif len(ext) != len(filename):
                raise IndexError('Length of the extension array must match \
                    number of images.')
            for i in range(len(filename)):
                img_hdu = fits.open(filename[i])
                img_data.append(img_hdu[ext[i]].data)
                img_hdr.append(clean_header(img_hdu[ext[i]].header))
            self.data = img_data
            self.hdr = img_hdr
Exemplo n.º 4
0
def combine(fitsfiles, outputfile, method="average"):
    """Combine a set of FITS files, taking care of header keywords

    :argument fitsfiles: FITS filenames to combine
    :type fitsfiles: tuple
    :argument outputfile: output FITS filename
    :type outputfile: str

    :keyword method: average or sum the images
    :type method: str

    :returns: None

    """

    if method is None:
        return
    N = len(fitsfiles)
    if N == 1:
        shutil.copyfile(fitsfiles[0], outputfile)
        return
    hdulist0 = pyfits.open(fitsfiles[0])
    header0 = hdulist0[0].header
    data = hdulist0[0].data
    freqs = [header0['reffreq']]
    header0.update('orig0', os.path.basename(fitsfiles[0]),
                   'original fitsfile')
    for i, filename in enumerate(fitsfiles[1:]):
        with pyfits.open(filename) as hdulist:
            header = hdulist[0].header
            data += hdulist[0].data
        freqs.append(header['reffreq'])
        header0.update(
            'orig%d' % (i + 1), os.path.basename(filename),
            'original fitsfile')
    if method == "average":
        data /= float(N)

    minfreq, maxfreq = min(freqs), max(freqs)
    hdu = pyfits.PrimaryHDU(data)
    reffreq = (minfreq + maxfreq) / 2
    bandwidth = maxfreq - minfreq
    header0.update('reffreq', reffreq,
                  'reference frequency')
    header0.update('bandwidt', bandwidth,
                  'estimated bandwidth')
    header0.update('FREQ_MIN', minfreq,
                  'minimum frequency')
    header0.update('FREQ_MAX', maxfreq,
                  'maximum frequency')
    # frequencies are stored in WCS coords (dimension 4),
    # but since we've copied those from the first image,
    # they need to be updated
    header0.update('crval4', reffreq)
    header0.update('cdelt4', bandwidth)
    hdu.header = header0
    hdulist = pyfits.HDUList([hdu])
    hdulist.writeto(outputfile)
    hdulist.close()
    hdulist0.close()
Exemplo n.º 5
0
def create_master_frame(calibration_dir, extension, master_path, nframes):
    """
    Take the median of a series of images to produce a master calibration frame (master dark, bias, ...)

    :param calibration_dir: path to calibration images (fits files)
    :param extension: file extension. Can be 'FTS', 'fits', 'FTS.gz', 'fits.gz'
    :return: Master image. Files are printed in dark_dir.
    """
    file_list = glob.glob(os.path.join(calibration_dir, '*.' + extension))
    hdu = fits.open(file_list[0], ignore_missing_end=True)
    header = hdu[0].header
    naxis1 = header['NAXIS1']
    naxis2 = header['NAXIS2']
    # If the number of files in the directory is smaller than the user input number of frames,
    # need to take the latter.
    nframes = np.min([len(file_list), nframes])
    frames = np.zeros([naxis1, naxis2, nframes])

    print('Creating master frame from %d frames...' %nframes)

    for i in range(0, nframes):
        hdu = fits.open(file_list[i], ignore_missing_end=True)
        frames[:,:,i] = hdu[0].data

    master_frame = np.median(frames, 2)

    # Write to disk if path is set for the master frame
    if master_path:
        write_uset_fits(master_frame, header, master_path)
        print('Wrote master into %s ' %master_path)

    return master_frame
Exemplo n.º 6
0
def test_different_keyword_values(file1, file2):
    hdulist1 = fits.open(file1)
    header1 = hdulist1[0].header
    hdulist2 = fits.open(file2)
    header2 = hdulist2[0].header
    diff = HeaderDiff(header1, header2)
    assert not diff.identical
Exemplo n.º 7
0
def set_grism_flat(grism='G141', verbose=True):
    import threedhst.grism_sky as bg
    
    if bg.flat_grism == grism:
        return True
    
    if verbose:
        print 'Set flat for grism: %s' %(grism)
    
    if grism == 'G141':
        flat_f140 = pyfits.open(IREF+'/uc721143i_pfl.fits')
        #flat_f140 = pyfits.open(IREF+'cosmos_f140w_flat.fits')
        #flat_f140 = pyfits.open(IREF+'/flat_3DHST_F140W_t1_v0.1.fits')
        flat_g141 = pyfits.open(IREF+'/u4m1335mi_pfl.fits')
        flat = flat_g141[1].data[5:1019,5:1019] / flat_f140[1].data[5:1019, 5:1019]
        flat[flat <= 0] = 5
        flat[flat > 5] = 5
        bg.flat = flat
        bg.flat_grism = 'G141'
        bg.flat_direct = flat_f140.filename()
        
    else:
        flat_f105 = pyfits.open(IREF+'/uc72113oi_pfl.fits')
        flat_g102 = pyfits.open(IREF+'/u4m1335li_pfl.fits')
        flat = flat_g102[1].data[5:1019,5:1019] / flat_f105[1].data[5:1019, 5:1019]
        flat[flat <= 0] = 5
        flat[flat > 5] = 5
        bg.flat = flat
        bg.flat_grism = 'G102'
        bg.flat_direct = flat_f105.filename()
        
    return True
Exemplo n.º 8
0
def makeImgArray(obs_properties, flist, directory, raw_fname, exp_values):
    
    img_array=pandas.DataFrame.from_items([('expnum',[]),('refccd',[]),('refpng',[]), ('refdate',[]),('reftef',[]),('compimages',[]),('compexp',[]),('compccd',[]),('compdate',[]),('comptef',[])])
    
    def getkey(item):
            return item[0]
        
    for i in range(0,len(obs_properties)):
        
        refnum=obs_properties['expnum'][i]
        #refccd=obs_properties['ccd'][i]
        refccd=obs_properties['ccd'][i]
        refband=obs_properties['band'][i]
        refdate=obs_properties['date'][i]
        ref_ra=obs_properties['ra'][i]
        ref_dec=obs_properties['dec'][i]
        CompImgs=[]
        expnums=[]
        refpng=''
        reftef=round(float(exp_values[exp_values['expnum']==refnum]['t_eff']),3)
        
        for thumb in flist:
            
            os.chdir(directory+'/'+raw_fname+'/'+thumb)
            fit_list = glob.glob('*fits')
        
            for f in fit_list:
                h=fits.open(f)
                if h[0].header["EXPNUM"]==refnum and h[0].header['CCDNUM']==refccd:
                    reffit=f
                    refpng=thumb+'/'+reffit[:-5]+'Circ.png'
                    drawObsCircle(reffit,reffit[:-5]+'Circ.png','temp.reg',ref_ra,ref_dec)
                    for g in fit_list:
                        j=fits.open(g)
                        jnum=j[0].header['EXPNUM']
                        if (j[0].header['BAND']==refband and jnum != refnum and not (jnum in expnums)):
                            try:
                                teff=float(exp_values[exp_values['expnum']==jnum]['t_eff'])
                            except:
                                teff=0
                                print jnum
                            pnglabel=g[:-5]+'Circ.png'
                            drawObsCircle(g, pnglabel, 'temp.reg',ref_ra, ref_dec)
                            date=str(j[0].header['DATE-OBS'])
                            nicedate=date[0:4]+'/'+date[5:7]+'/'+date[8:10]+' '+date[11:19]
                            CompImgs.append([teff,thumb+'/'+pnglabel, jnum, j[0].header['CCDNUM'], nicedate])
                            expnums+=[jnum]
                        j.close()
                    flist.remove(thumb)
                    break 
                h.close()
                

        
        sortedcomps=sorted(CompImgs, key=getkey, reverse=True)
        
        tempframe=pandas.DataFrame.from_items([('expnum', refnum),('refccd',refccd), ('refpng',[ refpng]), ('refdate', [refdate]),('reftef',[reftef]),('compimages', [[x[1] for x in sortedcomps]]),('compexp',[[x[2] for x in sortedcomps]]), ('compccd',[[x[3] for x in sortedcomps]]),('compdate',[[x[4] for x in sortedcomps]]),('comptef',[[round(x[0],3) for x in sortedcomps]])])                
        img_array=img_array.append(tempframe, ignore_index=True)
        
    return img_array
Exemplo n.º 9
0
def test_identical_number_keywords_headers(file1, file2):
    hdulist1 = fits.open(file1)
    header1 = hdulist1[0].header
    hdulist2 = fits.open(file2)
    header2 = hdulist2[0].header
    diff = HeaderDiff(header1, header2)
    assert diff.diff_keyword_count[0] == diff.diff_keyword_count[1]
Exemplo n.º 10
0
def getQsoCatalogueAllObjects():
	'''

	'''

	path = '/home/gpfs/manip/mnt0607/bao/hdumasde/Data/Catalogue/'
	listPAth = [path+'QSO_DR7_DR12_EBOSS_2016_01_08.fits',
			path+'DLA_all.fits',
			path+'all_Britt.fits',
			path+'VIPERS.fits',
			path+'QSO_3DHST.fits',
			path+'LOWZ_all.fits',
			path+'CMASS_all.fits']
	name = ['QSO','DLA','Britt','VIPERS','3DHST','LOWZ','CMASS']

	## Distribution redshift
	for i in numpy.arange(len(listPAth)):
		cat = pyfits.open(listPAth[i], memmap=True )[1].data
		cat = cat[ (cat['Z']>0.1) ]
		cat = cat[ (cat['Z']<7.) ]
		if (cat.size==0): continue
		plt.hist(cat['Z'], bins=100,histtype='step',label=name[i])

	plt.xlabel("Z")
	plt.ylabel("#")
	myTools.deal_with_plot(False,False,True)
	plt.show()

	### Merge everyThing
	cat = pyfits.open(listPAth[0], memmap=True )[1].data
	ra = cat['RA']
	de = cat['DEC']
	zz = cat['Z']
	for i in numpy.arange(1,len(listPAth)):
		cat = pyfits.open(listPAth[i], memmap=True )[1].data
		ra = numpy.append(ra, cat['RA'])
		de = numpy.append(de, cat['DEC'])
		zz = numpy.append(zz, cat['Z'])

	## Map
	plt.ticklabel_format(style='sci', axis='z', scilimits=(0,0))	
	plt.grid()
	plt.plot(ra, de, linestyle="", marker="o")
	plt.xlabel("Right Ascension (degree)")
	plt.ylabel("Declination (degree)")
	plt.show()
	## Distribution redshift
	plt.ticklabel_format(style='sci', axis='z', scilimits=(0,0))
	plt.grid()
	plt.hist(zz, bins=200)
	plt.xlabel("Z")
	plt.ylabel("#")
	plt.show()

	### Save	
	col_ra              = pyfits.Column(name='RA',  format='D', array=ra, unit='deg')
	col_de              = pyfits.Column(name='DEC', format='D', array=de, unit='deg')
	col_zz              = pyfits.Column(name='Z',   format='D', array=zz)
	tbhdu = pyfits.BinTableHDU.from_columns([col_ra, col_de, col_zz])
	tbhdu.writeto('/home/gpfs/manip/mnt0607/bao/hdumasde/Data/Catalogue/ALL_EVERY_OBJECTS_2016_01_08.fits', clobber=True)
Exemplo n.º 11
0
    def set_total_costs_matrix(self, i, j, def_param = None):
        def_param = total_costs_matrix_base
        curve_name_i = all_curves[i][0]
        curve_type_i = all_curves[i][1]
        curve_file_i = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_i+'/'+curve_name_i+'.fits',
                        memmap=False)

        curve_data_i = Extractor.get_values(curve_file_i)

        curve_file_i.close()

        curve_name_j = all_curves[j][0]
        curve_type_j = all_curves[j][1]
        curve_file_j = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_j+'/'+curve_name_j+'.fits',
                        memmap=False)

        curve_data_j = Extractor.get_values(curve_file_j)

        curve_file_j.close()

        x,y = curve_data_i, curve_data_j

        dtw = DTW(x,y)
        cost_matrix = dtw.compute_cost_matrix(DTW.euclidean_distance)
        acc_cost_matrix, cost = dtw.compute_acc_cost_matrix(cost_matrix)

        self.total_costs_matrix[i,j] = cost
Exemplo n.º 12
0
    def test_aa(self):
        cmd = """{executable} {specter_dir}/bin/specter \
          -i {sky} \
          -p {monospot_file} \
          -t {throughput_file} \
          -o {imgfile} \
          -w 7500,7620 \
          -n --specmin 0 --nspec 2 --exptime 1500""".format(
            executable=self.executable,
            specter_dir=self.specter_dir,
            sky=self.sky_file,
            monospot_file=self.monospot_file,
            throughput_file=self.throughput_file,
            imgfile = imgfile1)
        print(cmd)
        err = os.system(cmd)
        self.assertEqual(err, 0, 'Error code {} != 0'.format(err))
        self.assertTrue(os.path.exists(imgfile1))

        with fits.open(imgfile1) as fx:
            self.assertIn('CCDIMAGE', fx)
            self.assertIn('IVAR', fx)

        #- Test the I/O routines while we have the file handy
        image, ivar, hdr = read_image(imgfile1)
        self.assertEqual(image.shape, ivar.shape)

        os.remove(imgfile1)
        cmd = cmd + ' --extra'
        err = os.system(cmd)
        self.assertEqual(err, 0, 'Error code {} != 0'.format(err))
        self.assertTrue(os.path.exists(imgfile1))
        with fits.open(imgfile1) as fx:
            self.assertIn('PHOTONS', fx)
            self.assertIn('XYWAVE', fx)
Exemplo n.º 13
0
def mask_satellite_trail(flc='jcdu36e5q_flc.backup.fits', mask_files=['jcdu36e5q_mask_1.reg', 'jcdu36e5q_mask_2.reg']):
    """
    Use Pyregion to mask satellite trails with a region file. 
    
    Note: save the region file in "fk5" sky coordinates
    """
    import astropy.io.fits as pyfits
    import pyregion
    
    ### open the FITS image for modifying
    im = pyfits.open(flc) 
    
    ### Make the mask for both "DQ" extensions for both ACS/WFC chips
    for extension in [1,2]:
        ### Get a mask returning 1 *within* the region and 0 elsewhere
        im['SCI',extension].header.remove('CPDIS1')
        im['SCI',extension].header.remove('CPDIS2')
        reg = pyregion.open(mask_files[extension-1]).as_imagecoord(im['SCI', extension].header)
        mask = reg.get_mask(im['SCI', extension])
        ### Apply the mask to the DQ extension
        im['DQ', extension].data |= 4096*mask
    
    imf = pyfits.open(flc, mode='update')
    for extension in [1,2]:
        imf['DQ', extension].data = im['DQ',extension].data
        
    ### Write the file
    imf.flush()
Exemplo n.º 14
0
def validate_vs_jwpsf_nircam():

    models = [ ('NIRCam','F200W', 'f200w_perfect_offset', '/Users/mperrin/software/jwpsf_v3.0/data/NIRCam/OPD/perfect_opd.fits', 0.034,True),
            ('NIRCam','F200W', 'f200w_perfect', '/Users/mperrin/software/jwpsf_v3.0/data/NIRCam/OPD/perfect_opd.fits', 0.034,False),
            ('NIRCam','F200W', 'f200w', '/Users/mperrin/software/jwpsf_v3.0/data/NIRCam/OPD/nircam_obs_w_rsrv1.fits', 0.034,True),
                ('MIRI','F1000W', 'f1000w', '/Users/mperrin/software/jwpsf_v3.0/data/MIRI/OPD/MIRI_OPDisim1.fits', 0.11,True)]


    fig = P.figure(1, figsize=(13,8.5), dpi=80)
    oversamp=4
    for params in models:

        nc = webbpsf_core.Instrument(params[0])
        nc.filter = params[1]
        nc.pupilopd = params[3] #'/Users/mperrin/software/jwpsf_v3.0/data/NIRCam/OPD/nircam_obs_w_rsrv1.fits'
        nc.pixelscale = params[4] #0.034 # this is wrong, but compute this way to match JWPSF exactly
        if params[5]:
            # offset by half a pixel to match the JWPSF convention
            nc.options['source_offset_r'] = params[4]/2 * N.sqrt(2)/oversamp  # offset half a pixel each in X and Y
            nc.options['source_offset_theta'] = -45


        jw_fn = 'jwpsf_%s_%s.fits' % (params[0].lower(), params[2].lower())
        my_fn = 'test_vs_' + jw_fn

        if not os.path.exists( my_fn):
            my_psf = nc.calcPSF(my_fn, oversample=oversamp, fov_pixels=512./oversamp)
        else:
            my_psf = fits.open(my_fn)

        jw_psf = fits.open(jw_fn)
        jw_psf[0].header.update('PIXELSCL', jw_psf[0].header['CDELT1']*3600)


        P.clf()
        #P.subplots_adjust(top=0.95, bottom=0.05, left=0.01, right=0.99)
        P.subplot(231)
        titlestr = "%s %s, \n"%  (params[0], params[2])
        poppy.display_PSF(my_psf, title=titlestr+"computed with WebbPSF" , colorbar=False)
        P.subplot(232)
        poppy.display_PSF(jw_psf, title=titlestr+"computed with JWPSF" , colorbar=False)
        P.subplot(233)
        poppy.display_PSF_difference(my_psf,jw_psf, title=titlestr+'Difference Image', colorbar=False)

        imagecrop = 30*params[4]

        P.subplot(234)
        poppy.display_PSF(my_psf, title=titlestr+"computed with WebbPSF", colorbar=False, imagecrop=imagecrop)
        centroid = poppy.measure_centroid(my_psf)
        P.gca().set_xlabel("centroid = (%.3f,%.3f)" % centroid)

        P.subplot(235)
        poppy.display_PSF(jw_psf, title=titlestr+"computed with JWPSF", colorbar=False, imagecrop=imagecrop)
        centroid = poppy.measure_centroid(jw_psf)
        P.gca().set_xlabel("centroid = (%.3f,%.3f)" % centroid)

        P.subplot(236)
        poppy.display_PSF_difference(my_psf,jw_psf, title='Difference Image', colorbar=False, imagecrop=imagecrop)

        P.savefig("results_vs_jwpsf_%s_%s.pdf" % (params[0], params[2]))
Exemplo n.º 15
0
def deep_obj_mask(img, ota, apply=False):
    from astropy.io import fits
    from astropy.stats import sigma_clipped_stats
    image = odi.scaledpath+'scaled_'+ota+'.'+str(img[16:])
    ota_mask = 'objmask_'+ota+'.'+str(img[16:17])+'.fits'
    hdulist = fits.open(image)
    hdu_ota = hdulist[0]
    # maskhdu = fits.open(bppath+ota_mask)
    gapshdu = fits.open(odi.bppath+'reproj_mask_'+ota+'.'+str(img[16:]))
    total_mask = gapshdu[0].data
    #maskhdu[0].data + 

    nx, ny = hdu_ota.data.shape
    print nx, ny
    mean1, median1, std1 = sigma_clipped_stats(hdu_ota.data[0:ny/2,0:nx/2], mask=total_mask[0:ny/2,0:nx/2], sigma=3.0, iters=3)
    mean2, median2, std2 = sigma_clipped_stats(hdu_ota.data[0:ny/2,nx/2:nx], mask=total_mask[0:ny/2,nx/2:nx], sigma=3.0, iters=3)
    mean3, median3, std3 = sigma_clipped_stats(hdu_ota.data[ny/2:ny,0:nx/2], mask=total_mask[ny/2:ny,0:nx/2], sigma=3.0, iters=3)
    mean4, median4, std4 = sigma_clipped_stats(hdu_ota.data[ny/2:ny,nx/2:nx], mask=total_mask[ny/2:ny,nx/2:nx], sigma=3.0, iters=3)
    mean = [mean1, mean2, mean3, mean4]
    median = [median1, median2, median3, median4]
    std = [std1, std2, std3, std4]
    # plt.imshow(hdu_ota.data, origin='lower', cmap='Greys_r', vmin=-10., vmax=500.)
    # plt.imshow(total_mask, cmap=random_cmap(), alpha=0.5)
    # plt.show()
    return mean, median, std
Exemplo n.º 16
0
def Downsample(filein,keys=[]):
	"""
	Selects keys from a fits file and produces a new file.
	-Input:
		filein (str): file to read.
		keys (list): a list containing the columnames to keep.
	"""

	data          = fits.open(filein)[1].data
	columnnames   = fits.open(filein)[1].columns.names
	columnformats = fits.open(filein)[1].columns.formats

	for key_ in keys:
		if not key_ in columnames:
			raise ValueError('The name '+key_+' is not present.')

	newdata = []
	newcol  = []
	newtype = []	
	for key_ in keys:
		newdata.append( data[key_] )
		newcol.append( key_ )
		newtype.append( columnformats[columnnames.index(key_)] )

	columnlist = map(lambda name_,format_,array_: fits.Column( name=name_,format=format_,array=array_ ),newcol,newtype,newdata)

	cols  = fits.ColDefs(columnlist)
	tbhdu = fits.BinTableHDU.from_columns(cols)
	tbhdu.writeto(filein+'_downsample')
Exemplo n.º 17
0
    def read(cls, filename, er_filename=None, **kwargs):
        """ Reads in a given FITS file and returns a dictionary of new
        EISSpectralCubes. Additional parameters are given to fits.open.

        Parameters
        ----------
        filename: string
            Complete location of the FITS file
        er_filename: string
            Location of the error FITS file
        """
        hdulist = fits.open(name=filename, **kwargs)
        errlist = fits.open(er_filename) if er_filename is not None else None
        header = _clean(hdulist[0].header)
        # TODO: Make sure each cube has a correct wcs.
        wavelengths = [c.name for c in hdulist[1].columns if c.dim is not None]
        data = [hdulist[1].data[wav] for wav in wavelengths]
        errs = [errlist[1].data[wav] if errlist is not None else None
                for wav in wavelengths]
        cubes = []
        for i in range(len(data)):
            window = i + 1
            header = _dictionarize_header(hdulist[1].header, hdulist[0].header,
                                          window)
            wcs = WCS(header=header, naxis=3)
            uncertainty = sdu(errs[i]) if errlist is not None else None
            cubes += [EISCube(data[i], wcs, header, errors=uncertainty)]
        return dict(zip(wavelengths, cubes))
def FITS_to_sinos(fitsdir,slice_frac,sample_freq):
    """
    Takes a path to the directory of FITS files, assumed only having files (no
    directories) and an 2-length array (read as 1d) of the first and last
    vertical portions to store. These are read as fractions of the whole
    (for example, [.1,.9] will read  all data between 10% and 90%--only includ-
    ing the file at 10% and not the one at 90%--of the vertical). We sample the
    files with frequency sample_freq, so downsampling along the
    After reading this data from the FITS files, sinograms are formatted.
    SUudirectories are ignored.
    We assume all files have the same shaped images and that fitsdir DOES
    end with a '/'. Also, assume that slicefrac[1]>=slicefrac[0]
    """
    from astropy.io import fits
    import glob
    #Get list of files
    files = glob.glob(fitsdir+'*.fits')

    #If in Mac, need to remove hidden files, not sure how to do it smart
    hdulist = fits.open(files[0],mode = 'readonly')
    data = hdulist[0].data
    #Determine slices which we store
    ind1 = np.round(slice_frac[0]*data.shape[1]).astype(int)
    ind2 = np.round(slice_frac[1]*data.shape[1]).astype(int)
    if ind1==ind2:
        indices =ind1
    else:
        indices = np.arange(ind1,ind2,1)
    file_ind = range(0,len(files),sample_freq)
    sinos = np.empty([indices.size,data.shape[0],len(file_ind)])
    for i in range(len(file_ind)):
        hdulist = fits.open(files[file_ind[i]])
        sinos[:,:,i]= hdulist[0].data[indices,:]
    sinos = np.transpose(sinos,(1,2,0))
    return sinos
Exemplo n.º 19
0
def Mask(filecat=None,filemask=[],maskname=[]):
	"""
	Given a Fits file, appends the value of the mask to the table fits on a new file 
	-Input:
		filecat (str): The name of the file with the catalog.
		filemask (list): List of the names of the file with the mask.
		maskname (list): List of the names of the new field
	"""

	if len(filemask) == 0:
		raise ValueError('No mask given.')
	if not len(filemask) == len(maskname):
		raise ValueError('The number of files and headers does not match.')

	catalog       = fits.open(filecat)[1].data
	columnnames   = fits.open(filecat)[1].columns.names
	columnformats = fits.open(filecat)[1].columns.formats

	masklist = []
	for file_ in filemask:
		masklist.append( GetMaskArray(file_,catalog['ra'],catalog['dec']) )


	columns  = [ catalog[col_] for col_ in columnnames ]
	columns       += masklist
	columnnames   += maskname
	columnformats += [ 'E' for name_ in maskname ]

	columnlist = map(lambda name_,format_,array_: fits.Column( name=name_,format=format_,array=array_ ),columnnames,columnformats,columns)

	cols  = fits.ColDefs(columnlist)
	tbhdu = fits.BinTableHDU.from_columns(cols)
	tbhdu.writeto(filecat+'_'.join(maskname))
Exemplo n.º 20
0
def gaia(filename,estimator='max',axis=0):
    f = pyfits.open(filename)
    hdr = f[0].header
    cube = f[0].data
    dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
    dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
    dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
    if dv is None: dv = hdr.get('CDELT3')
    if dr is None: dr = hdr.get('CDELT1')
    if dd is None: dd = hdr.get('CDELT2')
    xtora = lambda x: (x-p1+1)*dr+r0    # convert pixel coordinates to RA/Dec/Velocity
    ytodec = lambda y: (y-p2+1)*dd+d0
    vconv = lambda v: (v-p3+1)*dv+v0

    if axis > 0:
        cube = cube.swapaxes(0,axis)

    if estimator == 'max':
        p = where(isnan(cube),0,cube).max(axis=0)
    elif estimator == 'int':
        p = where(isnan(cube),0,cube).sum(axis=0) * dv
    elif estimator == 'intdivmax':
        cut = MAD(cube.ravel()) + nanmedian(cube.ravel())
        if cut < 0:
            cut = 0
        m = where(isnan(cube),0,cube).max(axis=0)
        i = where(isnan(cube),0,cube).sum(axis=0) * dv
        p = where(i<0,0,i)/where(m<=cut,numpy.inf,m)
    elif estimator[-5:] == ".fits":
        p = pyfits.open(estimator)[0].data

    mapplot(p,cube,vconv,xtora,ytodec)
Exemplo n.º 21
0
def cross_correlation_HST_diff_NDR():
	# Cross correlated the differential non-destructive reads from HST Scanning mode with WFC3 and G141
	import image_registration as ir
	from glob import glob
	from pylab import *;ion()
	from astropy.io import fits

	fitsfiles = glob("*ima*fits")

	ylow   = 50
	yhigh  = 90
	nExts  = 36
	extGap = 5
	shifts_ndr = zeros((len(fitsfiles), (nExts-1)//extGap, 2))
	fitsfile0 = fits.open(fitsfiles[0])
	for kf, fitsfilenow in enumerate(fitsfiles):
			 fitsnow = fits.open(fitsfilenow)
			 for kndr in range(extGap+1,nExts+1)[::extGap][::-1]:
					 fits0_dndrnow = fitsfile0[kndr-extGap].data[ylow:yhigh]- fitsfile0[kndr].data[ylow:yhigh]
					 fitsN_dndrnow = fitsnow[kndr-extGap].data[ylow:yhigh]  - fitsnow[kndr].data[ylow:yhigh]
					 shifts_ndr[kf][(kndr-1)//extGap-1] = ir.chi2_shift(fits0_dndrnow, fitsN_dndrnow)[:2]
					 #ax.clear()
					 #plt.imshow(fitsN_dndrnow)
					 #ax.set_aspect('auto')
					 #plt.pause(1e-3)

	plot(shifts_ndr[:,:-1,0],'o') # x-shifts 
	plot(shifts_ndr[:,:-1,1],'o') # y-shifts
Exemplo n.º 22
0
    def test_iteration_of_lazy_loaded_hdulist(self):
        """
        Regression test for https://github.com/astropy/astropy/issues/5585
        """
        hdulist = fits.HDUList(fits.PrimaryHDU())
        hdulist.append(fits.ImageHDU(name='SCI'))
        hdulist.append(fits.ImageHDU(name='SCI'))
        hdulist.append(fits.ImageHDU(name='nada'))
        hdulist.append(fits.ImageHDU(name='SCI'))

        filename = self.temp('many_extension.fits')
        hdulist.writeto(filename)
        f = fits.open(filename)

        # Check that all extensions are read if f is not sliced
        all_exts = [ext for ext in f]
        assert len(all_exts) == 5

        # Reload the file to ensure we are still lazy loading
        f.close()
        f = fits.open(filename)

        # Try a simple slice with no conditional on the ext. This is essentially
        # the reported failure.
        all_exts_but_zero = [ext for ext in f[1:]]
        assert len(all_exts_but_zero) == 4

        # Reload the file to ensure we are still lazy loading
        f.close()
        f = fits.open(filename)

        # Check whether behavior is proper if the upper end of the slice is not
        # omitted.
        read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
        assert len(read_exts) == 2
Exemplo n.º 23
0
    def test_update_resized_header2(self):
        """
        Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150

        This is similar to test_update_resized_header, but specifically tests a
        case of multiple consecutive flush() calls on the same HDUList object,
        where each flush() requires a resize.
        """

        data1 = np.arange(100)
        data2 = np.arange(100) + 100
        phdu = fits.PrimaryHDU(data=data1)
        hdu = fits.ImageHDU(data=data2)

        phdu.writeto(self.temp('temp.fits'))

        with fits.open(self.temp('temp.fits'), mode='append') as hdul:
            hdul.append(hdu)

        with fits.open(self.temp('temp.fits'), mode='update') as hdul:
            idx = 1
            while len(str(hdul[0].header)) <= 2880 * 2:
                hdul[0].header['TEST{}'.format(idx)] = idx
                idx += 1
            hdul.flush()
            hdul.append(hdu)

        with fits.open(self.temp('temp.fits')) as hdul:
            assert (hdul[0].data == data1).all()
            assert hdul[1].header == hdu.header
            assert (hdul[1].data == data2).all()
            assert (hdul[2].data == data2).all()
Exemplo n.º 24
0
    def read(cls, phafile, rmffile=None):
        """Read PHA fits file

        The energy binning is not contained in the PHA standard. Therefore is
        is inferred from the corresponding RMF EBOUNDS extension.

        Parameters
        ----------
        phafile : str
            PHA file with ``SPECTRUM`` extension
        rmffile : str
            RMF file with ``EBOUNDS`` extennsion, optional
        """
        phafile = make_path(phafile)
        spectrum = fits.open(str(phafile))["SPECTRUM"]
        counts = [val[1] for val in spectrum.data]
        if rmffile is None:
            val = spectrum.header["RESPFILE"]
            if val == "":
                raise ValueError("RMF file not set in PHA header. " "Please provide RMF file for energy binning")
            parts = phafile.parts[:-1]
            rmffile = Path.cwd()
            for part in parts:
                rmffile = rmffile.joinpath(part)
            rmffile = rmffile.joinpath(val)

        rmffile = make_path(rmffile)
        ebounds = fits.open(str(rmffile))["EBOUNDS"]
        bins = EnergyBounds.from_ebounds(ebounds)
        livetime = Quantity(0, "s")
        return cls(counts, bins, livetime=livetime)
Exemplo n.º 25
0
def fits_osiris_allclose(a, b):
    """Assert that two OSIRIS fits files are close."""
    
    a = fits.open(a)
    b = fits.open(b)
    
    try:
        del a[0].header['COMMENT']
        del b[0].header['COMMENT']
    
        report = StringIO()
        diff = FITSDiff(
            a, b,
            ignore_keywords=["COMMENT"],
            ignore_comments=["SIMPLE"],
            ignore_fields=[],
            ignore_blanks=True,
            ignore_blank_cards=True,
            tolerance=1e-5)
        diff.report(fileobj=report)
        assert diff.identical, report.getvalue()
    
    finally:
        a.close()
        b.close()
Exemplo n.º 26
0
    def test_parnames_round_trip(self):
        """
        Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/130

        Ensures that opening a random groups file in update mode or writing it
        to a new file does not cause any change to the parameter names.
        """

        # Because this test tries to update the random_groups.fits file, let's
        # make a copy of it first (so that the file doesn't actually get
        # modified in the off chance that the test fails
        self.copy_file('random_groups.fits')

        parameters = ['UU', 'VV', 'WW', 'BASELINE', 'DATE']
        with fits.open(self.temp('random_groups.fits'), mode='update') as h:
            assert h[0].parnames == parameters
            h.flush()
        # Open again just in read-only mode to ensure the parnames didn't
        # change
        with fits.open(self.temp('random_groups.fits')) as h:
            assert h[0].parnames == parameters
            h.writeto(self.temp('test.fits'))

        with fits.open(self.temp('test.fits')) as h:
            assert h[0].parnames == parameters
Exemplo n.º 27
0
def test_translated_map():
    """
    Map a pixel array to  at translated array.
    """
    first_file = os.path.join(DATA_DIR, 'input1.fits')
    first_hdu = fits.open(first_file)
    first_header = first_hdu[1].header

    first_wcs = wcs.WCS(first_header)
    naxis1, naxis2 = first_wcs.pixel_shape
    first_hdu.close()

    second_file = os.path.join(DATA_DIR, 'input3.fits')
    second_hdu = fits.open(second_file)
    second_header = second_hdu[1].header

    second_wcs = wcs.WCS(second_header)
    second_hdu.close()

    ok_pixmap = np.indices((naxis1, naxis2), dtype='float32') - 2.0
    ok_pixmap = ok_pixmap.transpose()

    pixmap = calc_pixmap.calc_pixmap(first_wcs, second_wcs)
    npt.assert_equal(pixmap.shape, ok_pixmap.shape) # Got x-y transpose right
    npt.assert_almost_equal(pixmap, ok_pixmap, decimal=5) # Mapping an array to a translated array
Exemplo n.º 28
0
    def test_save_backup(self):
        """Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121

        Save backup of file before flushing changes.
        """

        self.copy_file('scale.fits')

        with ignore_warnings():
            with fits.open(self.temp('scale.fits'), mode='update',
                           save_backup=True) as hdul:
                # Make some changes to the original file to force its header
                # and data to be rewritten
                hdul[0].header['TEST'] = 'TEST'
                hdul[0].data[0] = 0

        assert os.path.exists(self.temp('scale.fits.bak'))
        with fits.open(self.data('scale.fits'),
                       do_not_scale_image_data=True) as hdul1:
            with fits.open(self.temp('scale.fits.bak'),
                           do_not_scale_image_data=True) as hdul2:
                assert hdul1[0].header == hdul2[0].header
                assert (hdul1[0].data == hdul2[0].data).all()

        with ignore_warnings():
            with fits.open(self.temp('scale.fits'), mode='update',
                           save_backup=True) as hdul:
                # One more time to see if multiple backups are made
                hdul[0].header['TEST2'] = 'TEST'
                hdul[0].data[0] = 1

        assert os.path.exists(self.temp('scale.fits.bak'))
        assert os.path.exists(self.temp('scale.fits.bak.1'))
Exemplo n.º 29
0
def genFitsMeans(cubeName,procDir='/data/VIMS/covims_0004/procdata'):
    log={}
    cwd=os.getcwd()

    cube=cubeName.split('/')[1].split('.')[0]
    opDir=procDir+'/'+cubeName.split('/')[0]

    os.chdir(opDir)

    try:
        with pyf.open('c'+cube+'_ir.fits') as irHan:
            ir=irHan[0].data
            mean=num.mean(num.concatenate([ir[:34,:,:],ir[82:105]]),axis=0)
            print mean.shape
            HDU=pyf.PrimaryHDU(mean)
            list=pyf.HDUList([HDU])
            if os.path.isfile('c'+cube+'_ir_mean.fits'):
                os.remove('c'+cube+'_ir_mean.fits')
            list.writeto('c'+cube+'_ir_mean.fits')
    except: pass
    try:
        with pyf.open('c'+cube+'_vis.fits') as visHan:
            vis=visHan[0].data
            mean=num.mean(vis[:100,:,:],axis=0)
            HDU=pyf.PrimaryHDU(mean)
            list=pyf.HDUList([HDU])
            if os.path.isfile('c'+cube+'_vis_mean.fits'):
                os.remove('c'+cube+'_vis_mean.fits')
            list.writeto('c'+cube+'_vis_mean.fits')
    except: pass
                         


    os.chdir(cwd)
Exemplo n.º 30
0
def main():
    sv = fits.open(home_dir + "DES/data/sva1_gold_detmodel_gals.fits")[1].data
    hi = fits.open(home_dir + "DES/magnification/lbgselect/hi-z_mag_probability.fits")[1].data
    hi_test = fits.open(home_dir + "DES/magnification/lbgselect/hi-z_mag_probability_test.fits")[1].data
    lo = fits.open(home_dir + "DES/magnification/lbgselect/lo-z_mag_probability_test.fits")[1].data

    hi.sort(), sv.sort(), lo.sort(), hi_test.sort()

    svid, rid = match(sv["coadd_objects_id"], hi["coadd_objects_id"])

    ri = sv["mag_detmodel_r"][svid] - sv["mag_detmodel_i"][svid]
    gr = sv["mag_detmodel_g"][svid] - sv["mag_detmodel_r"][svid]

    Phi = hi["hi-z_prob"][rid] / (hi["hi-z_prob"][rid] + lo["lo-z_prob"][rid])
    Plo = lo["lo-z_prob"][rid] / (hi["hi-z_prob"][rid] + lo["lo-z_prob"][rid])

    print "P(hi-z)>=0.8: {}/{}".format(len(Phi[Phi >= 0.8]), len(Phi))
    print "P(hi-z)==P(hi-z)_test? {}".format(np.all(hi["hi-z_prob"] == hi_test["hi-z_prob"]))

    #    plt.scatter(Plo, Phi, edgecolor='none')
    #    plt.ylim(0.8, 1.0)
    #    plt.xlabel('P(lo-z)')
    #    plt.ylabel('P(hi-z)')
    #    plt.savefig(home_dir+'DES/magnification/lbgselect/Phi-vs-Plo')
    #    plt.close()

    plt.scatter(np.arange(len(hi["hi-z_prob"])), hi["hi-z_prob"] - hi_test["hi-z_prob"], edgecolor="none")
    plt.ylabel("old code - new code")
    plt.savefig(home_dir + "DES/magnification/lbgselect/oldhi-vs_newhi")
    plt.close()
Exemplo n.º 31
0
# Two command-line arguments:  Name of a file containing a list of the single-sensor
# FITS images and the name of the output file

# Merge a set of single CCD images into a (large) FITS file
# assumed to be in S00, SO1, ... order

import astropy.io.fits as fits
import sys

# Read the list of files
io = open(sys.argv[1], "r")
infiles = []
for line in io:
    infiles.append(line.rstrip())
io.close()

hdulist = fits.HDUList()

# Take primary header from the first file in the list
hdu = fits.open(infiles[0])
hdulist.append(hdu[0])

for infile in infiles:
    hdu = fits.open(infile)
    for i in range(1, 17):
        hdulist.append(hdu[i])

hdulist.writeto(sys.argv[2])
Exemplo n.º 32
0
def subtract_sky_background(in_files,
                            out_files=None,
                            method='scalar',
                            source='sky',
                            mask_sources=False,
                            file_path=None,
                            tmp_dir=None,
                            show=False,
                            debug=False):
    """Estimate and subtract the sky background via different methods and sources.

    TODO: Implement sky subtraction from image

    Args:
        in_files (specklepy.FileArchive):
            File archive storing the information of all the files in the reduction.
        out_files (list):
            List of files to apply the sky subtraction to. If left empty, the list stored in the `in_files` FileArchive
            is used.
        method (str, optional):
            Switch between a scalar (`scalar`) background value or a 2D image (`images`).
        source (str, optional):
            Source for estimating the background from. Can be either `sky` to measure from dedicated sky frames or
            `science` to use the science frames themselves. Typically, these frames have a high number of sources, so
            `mask_sources` should be switched on.
        mask_sources (bool, optional):
            In empty reference fields, this masking option should stay at `False`, since source masking is not well
            tested. However, masking sources yields a more precise result.
        file_path (str, optional):
            Path to the files, listed in `in_files`.
        tmp_dir (str, optional):
            Directory to which temporary results and QA data is stored.
        show (bool, optional):
            Show plots of sky estimates for each sequence. They will be created and stored regardless of this choice.
        debug (bool, optional):
            Show debugging information.
    """

    # Set logging level
    if debug:
        logger.setLevel('DEBUG')

    # Apply fall back values
    if method is None:
        method = 'scalar'
    logger.info(f"Sky background subtraction method: {method}")
    if source is None:
        source = 'sky'
    logger.info(f"Sky background subtraction source: {source}")
    if out_files is None:
        out_files = in_files.product_files
    if out_files is None:
        logger.warning(
            f"Output files are not declared in subtract_sky_background!")

    # Identify the observing sequences
    sequences = in_files.identify_sequences(source=source)

    # Start the background estimates
    if method == 'scalar':

        # Iterate through observing sequences
        for s, sequence in enumerate(sequences):

            logger.info(
                f"Starting observing sequence {s} :: Object {sequence.object} :: Setup {sequence.setup}"
            )

            # Compute weights based on the time offset to the individual sky observations
            weights = sequence.compute_weights()

            # Start extracting sky fluxes
            sky_bkg = np.zeros(sequence.n_sky)
            sky_bkg_std = np.zeros(sequence.n_sky)
            for i in trange(sequence.n_sky,
                            desc='Estimate sky background from cube'):
                file = sequence.sky_files[i]
                bkg, d_bkg = estimate_sky_background(file,
                                                     method=method,
                                                     mask_sources=mask_sources,
                                                     path=file_path)
                sky_bkg[i] = bkg
                sky_bkg_std[i] = d_bkg
            logger.debug(
                f"Shapes:\nF: {sky_bkg.shape}\ndF: {sky_bkg_std.shape}")

            # Compute weighted sky background for each science file
            weighted_sky_bkg = np.dot(weights, sky_bkg)
            weighted_sky_bkg_var = np.dot(np.square(weights),
                                          np.square(sky_bkg_std))

            # Store sky background estimates
            sky_bkg_table = Table(data=[
                sequence.sky_files, weighted_sky_bkg, weighted_sky_bkg_var
            ],
                                  names=['FILE', 'BKG', 'VAR'])
            sky_bkg_table_name = f"sky_bkg_{sequence.object}_{sequence.setup}.fits"
            sky_bkg_table.write(os.path.join(tmp_dir, sky_bkg_table_name),
                                overwrite=True)

            # Plot sky flux estimates
            for i, file in enumerate(sequence.sky_files):
                plt.text(sequence.sky_time_stamps[i],
                         sky_bkg[i],
                         file,
                         rotation=90,
                         alpha=.5)
            for i, file in enumerate(sequence.science_files):
                plt.text(sequence.science_time_stamps[i],
                         weighted_sky_bkg[i],
                         file,
                         rotation=90,
                         alpha=.66)
            plt.errorbar(x=sequence.sky_time_stamps,
                         y=sky_bkg,
                         yerr=sky_bkg_std,
                         fmt='None',
                         ecolor='tab:blue',
                         alpha=.5)
            plt.plot(sequence.sky_time_stamps,
                     sky_bkg,
                     'D',
                     label='Sky',
                     c='tab:blue')
            plt.errorbar(x=sequence.science_time_stamps,
                         y=weighted_sky_bkg,
                         yerr=np.sqrt(weighted_sky_bkg_var),
                         fmt='None',
                         ecolor='tab:orange',
                         alpha=.66)
            plt.plot(sequence.science_time_stamps,
                     weighted_sky_bkg,
                     'D',
                     label='Science',
                     c='tab:orange')
            plt.xlabel('Time (s)')
            plt.ylabel('Flux (counts)')
            plt.legend()
            save_figure(
                os.path.join(tmp_dir,
                             sky_bkg_table_name.replace('.fits', '.png')))
            if show:
                plt.show()
            plt.close()

            # Subtract sky from product files
            for i, science_file in enumerate(sequence.science_files):
                for out_file in out_files:
                    if science_file in out_file:
                        science_file = out_file
                logger.info(
                    f"Applying sky background subtraction on file {science_file}"
                )
                with fits.open(science_file, mode='update') as hdu_list:
                    hdu_list[0].data = hdu_list[0].data.astype(
                        float) - weighted_sky_bkg[i]
                    if 'VAR' in hdu_list:
                        hdu_list['VAR'].data = hdu_list[
                            'VAR'].data + weighted_sky_bkg_var[i]
                    else:
                        # Construct new HDU
                        shape = np.array(hdu_list[0].data.shape)[[-2, -1]]
                        data = np.full(shape=shape,
                                       fill_value=weighted_sky_bkg_var[i])
                        hdu = fits.ImageHDU(data=data, name='VAR')
                        hdu_list.append(hdu)
                    hdu_list[0].header.set('SKYCORR', str(datetime.now()))
                    hdu_list[0].header.set('SKYBKG', weighted_sky_bkg[i],
                                           "Sky background")
                    hdu_list[0].header.set('SKYVAR', weighted_sky_bkg_var[i],
                                           "Sky background variance")
                    hdu_list.flush()

    elif method in ['image', 'frame']:
        raise NotImplementedError(
            "Sky subtraction in image mode is not implemented yet!")

    else:
        raise ValueError(f"Sky subtraction method {method} is not understood!")
Exemplo n.º 33
0
from astropy.io import fits
import numpy as np
from astropy.table import Table
from collections import OrderedDict

data = fits.open("catalogue.fits")
snData = fits.open("../line_fluxes/sn_R1000_lines_combined.fits")

lineArr = ["C4_1548","C4_1551","O3_1661","O3_1666","C3_1907_C3_1910","O2_3726_O2_3729","Ne3_3869","O3_4363","HBaB_4861","O3_4959","O3_5007","HBaA_6563","N2_6584","S2_6716","S2_6731"]

eMPTout1 = np.genfromtxt("output_dither_MDC_00.txt", dtype=None, names=True)
eMPTout2 = np.genfromtxt("output_dither_MDC_01.txt", dtype=None, names=True)
eMPTout3 = np.genfromtxt("output_dither_MDC_02.txt", dtype=None, names=True)
outputDict = OrderedDict()
outputDict["ID"] = snData[1].data["ID"]
id_for_peter = np.zeros_like(snData[1].data["ID"])-1
for i in range(len(id_for_peter)):
  tempIdx = np.where(eMPTout1["ID_cat"] == snData[1].data["ID"][i])[0]
  if len(tempIdx) > 0:
    id_for_peter[i] = eMPTout1["ID_emma"][tempIdx]-1
  else:
    tempIdx = np.where(eMPTout2["ID_cat"] == snData[1].data["ID"][i])[0]
    if len(tempIdx) > 0:
      id_for_peter[i] = eMPTout2["ID_emma"][tempIdx]-1
    else:
      tempIdx = np.where(eMPTout3["ID_cat"] == snData[1].data["ID"][i])[0]
      if len(tempIdx) > 0:
        id_for_peter[i] = eMPTout3["ID_emma"][tempIdx]-1
outputDict["ID_for_peter"] = id_for_peter
outputDict["redshift"] = data['GALAXY PROPERTIES'].data['redshift']
Exemplo n.º 34
0
def buildEmptyDRZ(input, output):
    """
    Create an empty DRZ file.

    This module creates an empty DRZ file in a valid FITS format so that the HST
    pipeline can handle the Multidrizzle zero expossure time exception
    where all data has been excluded from processing.

    Parameters
    ----------
    input : str
        filename of the initial input to process_input
    output : str
        filename of the default empty _drz.fits file to be generated

    """

    # Identify the first input image
    inputfile = parseinput.parseinput(input)[0]
    if not inputfile:
        print('\n******* ERROR *******', file=sys.stderr)
        print(
              'No input file found!  Check specification of parameter '
              '"input". ', file=sys.stderr)
        print('Quitting...',  file=sys.stderr)
        print('******* ***** *******\n',  file=sys.stderr)
        return # raise IOError, "No input file found!"

    # Set up output file here...
    if output is None:
        if len(input) == 1:
            oname = fileutil.buildNewRootname(input[0])
        else:
            oname = 'final'
        output = fileutil.buildNewRootname(oname, extn='_drz.fits')
    else:
        if '_drz' not in output:
            output = fileutil.buildNewRootname(output, extn='_drz.fits')

    print('Building emtpy DRZ file with output name: %s' % output)

    # Open the first image (of the excludedFileList?) to use as a template to build
    # the DRZ file.
    try :
        log.info('Building empty DRZ file from %s' % inputfile[0])
        img = fits.open(inputfile[0], memmap=False)
    except:
        raise IOError('Unable to open file %s \n' % inputfile)

    # Create the fitsobject
    fitsobj = fits.HDUList()
    # Copy the primary header
    hdu = img[0].copy()
    fitsobj.append(hdu)

    # Modify the 'NEXTEND' keyword of the primary header to 3 for the
    #'sci, wht, and ctx' extensions of the newly created file.
    fitsobj[0].header['NEXTEND'] = 3

    # Create the 'SCI' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'SCI'
    fitsobj.append(hdu)

    # Create the 'WHT' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'WHT'
    fitsobj.append(hdu)

    # Create the 'CTX' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'CTX'
    fitsobj.append(hdu)

    # Add HISTORY comments explaining the creation of this file.
    fitsobj[0].header.add_history("** AstroDrizzle has created this empty "
                                  "DRZ product because**")
    fitsobj[0].header.add_history("** all input images were excluded from "
                                  "processing.**")


    # Change the filename in the primary header to reflect the name of the output
    # filename.
    fitsobj[0].header['FILENAME'] = str(output)  # +"_drz.fits"

    # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT
    fitsobj[0].header['ROOTNAME'] = str(output.split('_drz.fits')[0])
    # Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly
    # ingested into the archive catalog.

    # stis has this keyword in the [1] header, so I am directing the code
    #t o first look in the primary, then the 1
    try:
        fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH'
    except:
        fitsobj[1].header['ASN_MTYP'] = 'PROD-DTH'

    # If the file is already on disk delete it and replace it with the
    # new file
    dirfiles = os.listdir(os.curdir)
    if dirfiles.count(output) > 0:
        os.remove(output)
        log.info("       Replacing %s..." % output)

    # Write out the empty DRZ file
    fitsobj.writeto(output)

    print(textutil.textbox(
        'ERROR:\nAstroDrizzle has created an empty DRZ product because all '
        'input images were excluded from processing or a user requested the '
        'program to stop.') + '\n', file=sys.stderr)

    return
Exemplo n.º 35
0
 def load_allstar(self):
     self.apogee_dr = apogee_default_dr(dr=self.apogee_dr)
     allstarpath = allstar(dr=self.apogee_dr)
     hdulist = fits.open(allstarpath)
     print(f'Loading allStar DR{self.apogee_dr} catalog')
     return hdulist
Exemplo n.º 36
0
    def compile(self):
        h5name_check(self.filename)

        hdulist = self.load_allstar()
        indices = self.filter_apogeeid_list(hdulist)

        info = chips_pix_info(dr=self.apogee_dr)
        total_pix = (info[1] - info[0]) + (info[3] - info[2]) + (info[5] -
                                                                 info[4])
        default_length = 900000

        spec = np.zeros((default_length, total_pix), dtype=np.float32)
        spec_err = np.zeros((default_length, total_pix), dtype=np.float32)
        RA = np.zeros(default_length, dtype=np.float32)
        DEC = np.zeros(default_length, dtype=np.float32)
        SNR = np.zeros(default_length, dtype=np.float32)
        individual_flag = np.zeros(default_length, dtype=np.float32)
        Kmag = np.zeros(default_length, dtype=np.float32)
        AK_TARG = np.zeros(default_length, dtype=np.float32)

        # Data array
        teff = np.zeros(default_length, dtype=np.float32)
        logg = np.zeros(default_length, dtype=np.float32)
        MH = np.zeros(default_length, dtype=np.float32)
        alpha_M = np.zeros(default_length, dtype=np.float32)
        C = np.zeros(default_length, dtype=np.float32)
        C1 = np.zeros(default_length, dtype=np.float32)
        N = np.zeros(default_length, dtype=np.float32)
        O = np.zeros(default_length, dtype=np.float32)
        Na = np.zeros(default_length, dtype=np.float32)
        Mg = np.zeros(default_length, dtype=np.float32)
        Al = np.zeros(default_length, dtype=np.float32)
        Si = np.zeros(default_length, dtype=np.float32)
        P = np.zeros(default_length, dtype=np.float32)
        S = np.zeros(default_length, dtype=np.float32)
        K = np.zeros(default_length, dtype=np.float32)
        Ca = np.zeros(default_length, dtype=np.float32)
        Ti = np.zeros(default_length, dtype=np.float32)
        Ti2 = np.zeros(default_length, dtype=np.float32)
        V = np.zeros(default_length, dtype=np.float32)
        Cr = np.zeros(default_length, dtype=np.float32)
        Mn = np.zeros(default_length, dtype=np.float32)
        Fe = np.zeros(default_length, dtype=np.float32)
        Co = np.zeros(default_length, dtype=np.float32)
        Ni = np.zeros(default_length, dtype=np.float32)
        Cu = np.zeros(default_length, dtype=np.float32)
        Ge = np.zeros(default_length, dtype=np.float32)
        Ce = np.zeros(default_length, dtype=np.float32)
        Rb = np.zeros(default_length, dtype=np.float32)
        Y = np.zeros(default_length, dtype=np.float32)
        Nd = np.zeros(default_length, dtype=np.float32)
        parallax = np.zeros(default_length, dtype=np.float32)
        fakemag = np.zeros(default_length, dtype=np.float32)

        # Error array
        teff_err = np.zeros(default_length, dtype=np.float32)
        logg_err = np.zeros(default_length, dtype=np.float32)
        MH_err = np.zeros(default_length, dtype=np.float32)
        alpha_M_err = np.zeros(default_length, dtype=np.float32)
        C_err = np.zeros(default_length, dtype=np.float32)
        C1_err = np.zeros(default_length, dtype=np.float32)
        N_err = np.zeros(default_length, dtype=np.float32)
        O_err = np.zeros(default_length, dtype=np.float32)
        Na_err = np.zeros(default_length, dtype=np.float32)
        Mg_err = np.zeros(default_length, dtype=np.float32)
        Al_err = np.zeros(default_length, dtype=np.float32)
        Si_err = np.zeros(default_length, dtype=np.float32)
        P_err = np.zeros(default_length, dtype=np.float32)
        S_err = np.zeros(default_length, dtype=np.float32)
        K_err = np.zeros(default_length, dtype=np.float32)
        Ca_err = np.zeros(default_length, dtype=np.float32)
        Ti_err = np.zeros(default_length, dtype=np.float32)
        Ti2_err = np.zeros(default_length, dtype=np.float32)
        V_err = np.zeros(default_length, dtype=np.float32)
        Cr_err = np.zeros(default_length, dtype=np.float32)
        Mn_err = np.zeros(default_length, dtype=np.float32)
        Fe_err = np.zeros(default_length, dtype=np.float32)
        Co_err = np.zeros(default_length, dtype=np.float32)
        Ni_err = np.zeros(default_length, dtype=np.float32)
        Cu_err = np.zeros(default_length, dtype=np.float32)
        Ge_err = np.zeros(default_length, dtype=np.float32)
        Ce_err = np.zeros(default_length, dtype=np.float32)
        Rb_err = np.zeros(default_length, dtype=np.float32)
        Y_err = np.zeros(default_length, dtype=np.float32)
        Nd_err = np.zeros(default_length, dtype=np.float32)
        parallax_err = np.zeros(default_length, dtype=np.float32)
        fakemag_err = np.zeros(default_length, dtype=np.float32)

        array_counter = 0

        start_time = time.time()

        # provide a cont mask so no need to read every loop
        if self.cont_mask is None:
            maskpath = os.path.join(os.path.dirname(astroNN.__path__[0]),
                                    'astroNN', 'data',
                                    f'dr{self.apogee_dr}_contmask.npy')
            self.cont_mask = np.load(maskpath)

        for counter, index in enumerate(indices):
            nvisits = 1
            apogee_id = hdulist[1].data['APOGEE_ID'][index]
            location_id = hdulist[1].data['LOCATION_ID'][index]
            if counter % 100 == 0:
                print(
                    f'Completed {counter + 1} of {indices.shape[0]}, {(time.time() - start_time):.{2}f}s elapsed'
                )
            if self.continuum is False:
                path = combined_spectra(dr=self.apogee_dr,
                                        location=location_id,
                                        apogee=apogee_id,
                                        verbose=0)
                if path is False:
                    # if path is not found then we should skip
                    continue
                combined_file = fits.open(path)
                _spec = combined_file[
                    1].data  # Pseudo-continuum normalized flux
                _spec_err = combined_file[2].data  # Spectrum error array
                _spec = gap_delete(
                    _spec, dr=self.apogee_dr)  # Delete the gap between sensors
                _spec_err = gap_delete(_spec_err, dr=self.apogee_dr)
                inSNR = combined_file[0].header['SNR']
                combined_file.close()
            else:
                path = visit_spectra(dr=self.apogee_dr,
                                     location=location_id,
                                     apogee=apogee_id,
                                     verbose=0)
                if path is False:
                    # if path is not found then we should skip
                    continue
                apstar_file = fits.open(path)
                nvisits = apstar_file[0].header['NVISITS']
                if nvisits == 1:
                    _spec = apstar_file[1].data
                    _spec_err = apstar_file[2].data
                    _spec_mask = apstar_file[3].data
                    inSNR = np.ones(nvisits)
                    inSNR[0] = apstar_file[0].header['SNR']
                else:
                    _spec = apstar_file[1].data[1:]
                    _spec_err = apstar_file[2].data[1:]
                    _spec_mask = apstar_file[3].data[1:]
                    inSNR = np.ones(nvisits + 1)
                    inSNR[0] = apstar_file[0].header['SNR']
                    for i in range(nvisits):
                        inSNR[i + 1] = apstar_file[0].header[f'SNRVIS{i + 1}']

                    # Deal with spectra thats all zeros flux
                    ii = 0
                    while ii < _spec.shape[0]:
                        if np.count_nonzero(_spec[ii]) == 0:
                            nvisits -= 1
                            _spec = np.delete(_spec, ii, 0)
                            _spec_err = np.delete(_spec_err, ii, 0)
                            _spec_mask = np.delete(_spec_mask, ii, 0)
                            inSNR = np.delete(inSNR, ii, 0)
                            ii -= 1
                        ii += 1

                    # Just for the sake of program to work, the real nvisits still nvisits
                    nvisits += 1

                # Normalize spectra and Set some bitmask to 0
                _spec, _spec_err = self.apstar_normalization(
                    _spec, _spec_err, _spec_mask)
                apstar_file.close()

            if nvisits == 1:
                individual_flag[array_counter:array_counter + nvisits] = 0
            else:
                individual_flag[array_counter:array_counter + 1] = 0
                individual_flag[array_counter + 1:array_counter + nvisits] = 1
            spec[array_counter:array_counter + nvisits, :] = _spec
            spec_err[array_counter:array_counter + nvisits, :] = _spec_err
            SNR[array_counter:array_counter + nvisits] = inSNR
            RA[array_counter:array_counter + nvisits] = np.tile(
                hdulist[1].data['RA'][index], nvisits)
            DEC[array_counter:array_counter + nvisits] = np.tile(
                hdulist[1].data['DEC'][index], nvisits)
            parallax[array_counter:array_counter + nvisits] = np.tile(
                -9999, nvisits)
            parallax_err[array_counter:array_counter + nvisits] = np.tile(
                -9999, nvisits)
            fakemag[array_counter:array_counter + nvisits] = np.tile(
                -9999, nvisits)
            fakemag_err[array_counter:array_counter + nvisits] = np.tile(
                -9999, nvisits)
            Kmag[array_counter:array_counter + nvisits] = np.tile(
                hdulist[1].data['K'][index], nvisits)
            AK_TARG[array_counter:array_counter + nvisits] = np.tile(
                hdulist[1].data['AK_TARG'][index], nvisits)

            if self.spectra_only is not True:
                teff[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['PARAM'][index, 0], nvisits)
                logg[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['PARAM'][index, 1], nvisits)
                MH[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['PARAM'][index, 3], nvisits)
                alpha_M[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['PARAM'][index, 6], nvisits)
                C[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 0], nvisits)
                C1[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 1], nvisits)
                N[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 2], nvisits)
                O[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 3], nvisits)
                Na[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 4], nvisits)
                Mg[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 5], nvisits)
                Al[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 6], nvisits)
                Si[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 7], nvisits)
                P[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 8], nvisits)
                S[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 9], nvisits)
                K[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 10], nvisits)
                Ca[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 11], nvisits)
                Ti[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 12], nvisits)
                Ti2[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 13], nvisits)
                V[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 14], nvisits)
                Cr[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 15], nvisits)
                Mn[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 16], nvisits)
                Fe[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 17], nvisits)
                Co[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 18], nvisits)
                Ni[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 19], nvisits)
                Cu[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 20], nvisits)
                Ge[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 21], nvisits)
                Ce[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 22], nvisits)
                Rb[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 23], nvisits)
                Y[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 24], nvisits)
                Nd[array_counter:array_counter + nvisits] = np.tile(
                    hdulist[1].data['X_H'][index, 25], nvisits)

                if self.use_err is True:
                    teff_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['TEFF_ERR'][index], nvisits)
                    logg_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['LOGG_ERR'][index], nvisits)
                    MH_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['M_H_ERR'][index], nvisits)
                    alpha_M_err[array_counter:array_counter +
                                nvisits] = np.tile(
                                    hdulist[1].data['ALPHA_M_ERR'][index],
                                    nvisits)
                    C_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 0], nvisits)
                    C1_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 1], nvisits)
                    N_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 2], nvisits)
                    O_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 3], nvisits)
                    Na_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 4], nvisits)
                    Mg_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 5], nvisits)
                    Al_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 6], nvisits)
                    Si_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 7], nvisits)
                    P_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 8], nvisits)
                    S_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 9], nvisits)
                    K_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 10], nvisits)
                    Ca_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 11], nvisits)
                    Ti_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 12], nvisits)
                    Ti2_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 13], nvisits)
                    V_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 14], nvisits)
                    Cr_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 15], nvisits)
                    Mn_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 16], nvisits)
                    Fe_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 17], nvisits)
                    Co_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 18], nvisits)
                    Ni_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 19], nvisits)
                    Cu_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 20], nvisits)
                    Ge_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 21], nvisits)
                    Ce_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 22], nvisits)
                    Rb_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 23], nvisits)
                    Y_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 24], nvisits)
                    Nd_err[array_counter:array_counter + nvisits] = np.tile(
                        hdulist[1].data['X_H_ERR'][index, 25], nvisits)
            array_counter += nvisits

        spec = spec[0:array_counter]
        spec_err = spec_err[0:array_counter]
        individual_flag = individual_flag[0:array_counter]
        RA = RA[0:array_counter]
        DEC = DEC[0:array_counter]
        SNR = SNR[0:array_counter]

        if self.spectra_only is not True:
            teff = teff[0:array_counter]
            logg = logg[0:array_counter]
            Kmag = Kmag[0:array_counter]
            AK_TARG = AK_TARG[0:array_counter]
            MH = MH[0:array_counter]
            alpha_M = alpha_M[0:array_counter]
            C = C[0:array_counter]
            C1 = C1[0:array_counter]
            N = N[0:array_counter]
            O = O[0:array_counter]
            Na = Na[0:array_counter]
            Mg = Mg[0:array_counter]
            Al = Al[0:array_counter]
            Si = Si[0:array_counter]
            P = P[0:array_counter]
            S = S[0:array_counter]
            K = K[0:array_counter]
            Ca = Ca[0:array_counter]
            Ti = Ti[0:array_counter]
            Ti2 = Ti2[0:array_counter]
            V = V[0:array_counter]
            Cr = Cr[0:array_counter]
            Mn = Mn[0:array_counter]
            Fe = Fe[0:array_counter]
            Co = Co[0:array_counter]
            Ni = Ni[0:array_counter]
            Cu = Cu[0:array_counter]
            Ge = Ge[0:array_counter]
            Ce = Ce[0:array_counter]
            Rb = Rb[0:array_counter]
            Y = Y[0:array_counter]
            Nd = Nd[0:array_counter]
            parallax = parallax[0:array_counter]
            fakemag = fakemag[0:array_counter]

            teff_err = teff_err[0:array_counter]
            logg_err = logg_err[0:array_counter]
            MH_err = MH_err[0:array_counter]
            alpha_M_err = alpha_M_err[0:array_counter]
            C_err = C_err[0:array_counter]
            C1_err = C1_err[0:array_counter]
            N_err = N_err[0:array_counter]
            O_err = O_err[0:array_counter]
            Na_err = Na_err[0:array_counter]
            Mg_err = Mg_err[0:array_counter]
            Al_err = Al_err[0:array_counter]
            Si_err = Si_err[0:array_counter]
            P_err = P_err[0:array_counter]
            S_err = S_err[0:array_counter]
            K_err = K_err[0:array_counter]
            Ca_err = Ca_err[0:array_counter]
            Ti_err = Ti_err[0:array_counter]
            Ti2_err = Ti2_err[0:array_counter]
            V_err = V_err[0:array_counter]
            Cr_err = Cr_err[0:array_counter]
            Mn_err = Mn_err[0:array_counter]
            Fe_err = Fe_err[0:array_counter]
            Co_err = Co_err[0:array_counter]
            Ni_err = Ni_err[0:array_counter]
            Cu_err = Cu_err[0:array_counter]
            Ge_err = Ge_err[0:array_counter]
            Ce_err = Ce_err[0:array_counter]
            Rb_err = Rb_err[0:array_counter]
            Y_err = Y_err[0:array_counter]
            Nd_err = Nd_err[0:array_counter]
            parallax_err = parallax_err[0:array_counter]
            fakemag_err = fakemag_err[0:array_counter]

            if self.use_esa_gaia is True:
                gaia_ra, gaia_dec, gaia_parallax, gaia_err = gaiadr2_parallax(
                    cuts=True, keepdims=False)
                m1, m2, sep = xmatch(RA,
                                     gaia_ra,
                                     maxdist=2,
                                     colRA1=RA,
                                     colDec1=DEC,
                                     colRA2=gaia_ra,
                                     colDec2=gaia_dec,
                                     swap=False)
                parallax[m1] = gaia_parallax[m2]
                parallax_err[m1] = gaia_err[m2]
                fakemag[m1], fakemag_err[m1] = mag_to_fakemag(
                    extinction_correction(Kmag[m1], AK_TARG[m1]), parallax[m1],
                    parallax_err[m1])
            elif self.use_anderson_2017 is True:
                gaia_ra, gaia_dec, gaia_parallax, gaia_err = anderson_2017_parallax(
                )
                m1, m2, sep = xmatch(RA,
                                     gaia_ra,
                                     maxdist=2,
                                     colRA1=RA,
                                     colDec1=DEC,
                                     epoch1=2000.,
                                     colRA2=gaia_ra,
                                     colDec2=gaia_dec,
                                     epoch2=2000.,
                                     swap=False)
                parallax[m1] = gaia_parallax[m2]
                parallax_err[m1] = gaia_err[m2]
                fakemag[m1], fakemag_err[m1] = mag_to_fakemag(
                    extinction_correction(Kmag[m1], AK_TARG[m1]), parallax[m1],
                    parallax_err[m1])

        print(f'Creating {self.filename}.h5')
        h5f = h5py.File(f'{self.filename}.h5', 'w')
        h5f.create_dataset('spectra', data=spec)
        h5f.create_dataset('spectra_err', data=spec_err)
        h5f.create_dataset('in_flag', data=individual_flag)
        h5f.create_dataset('index', data=indices)

        if self.spectra_only is not True:
            h5f.create_dataset('SNR', data=SNR)
            h5f.create_dataset('RA', data=RA)
            h5f.create_dataset('DEC', data=DEC)
            h5f.create_dataset('Kmag', data=Kmag)
            h5f.create_dataset('AK_TARG', data=AK_TARG)
            h5f.create_dataset('teff', data=teff)
            h5f.create_dataset('logg', data=logg)
            h5f.create_dataset('M', data=MH)
            h5f.create_dataset('alpha', data=alpha_M)
            h5f.create_dataset('C', data=C)
            h5f.create_dataset('C1', data=C1)
            h5f.create_dataset('N', data=N)
            h5f.create_dataset('O', data=O)
            h5f.create_dataset('Na', data=Na)
            h5f.create_dataset('Mg', data=Mg)
            h5f.create_dataset('Al', data=Al)
            h5f.create_dataset('Si', data=Si)
            h5f.create_dataset('P', data=P)
            h5f.create_dataset('S', data=S)
            h5f.create_dataset('K', data=K)
            h5f.create_dataset('Ca', data=Ca)
            h5f.create_dataset('Ti', data=Ti)
            h5f.create_dataset('Ti2', data=Ti2)
            h5f.create_dataset('V', data=V)
            h5f.create_dataset('Cr', data=Cr)
            h5f.create_dataset('Mn', data=Mn)
            h5f.create_dataset('Fe', data=Fe)
            h5f.create_dataset('Co', data=Co)
            h5f.create_dataset('Ni', data=Ni)
            h5f.create_dataset('Cu', data=Cu)
            h5f.create_dataset('Ge', data=Ge)
            h5f.create_dataset('Ce', data=Ce)
            h5f.create_dataset('Rb', data=Rb)
            h5f.create_dataset('Y', data=Y)
            h5f.create_dataset('Nd', data=Nd)
            h5f.create_dataset('parallax', data=parallax)
            h5f.create_dataset('fakemag', data=fakemag)

            if self.use_err is True:
                h5f.create_dataset('AK_TARG_err', data=np.zeros_like(AK_TARG))
                h5f.create_dataset('teff_err', data=teff_err)
                h5f.create_dataset('logg_err', data=logg_err)
                h5f.create_dataset('M_err', data=MH_err)
                h5f.create_dataset('alpha_err', data=alpha_M_err)
                h5f.create_dataset('C_err', data=C_err)
                h5f.create_dataset('C1_err', data=C1_err)
                h5f.create_dataset('N_err', data=N_err)
                h5f.create_dataset('O_err', data=O_err)
                h5f.create_dataset('Na_err', data=Na_err)
                h5f.create_dataset('Mg_err', data=Mg_err)
                h5f.create_dataset('Al_err', data=Al_err)
                h5f.create_dataset('Si_err', data=Si_err)
                h5f.create_dataset('P_err', data=P_err)
                h5f.create_dataset('S_err', data=S_err)
                h5f.create_dataset('K_err', data=K_err)
                h5f.create_dataset('Ca_err', data=Ca_err)
                h5f.create_dataset('Ti_err', data=Ti_err)
                h5f.create_dataset('Ti2_err', data=Ti2_err)
                h5f.create_dataset('V_err', data=V_err)
                h5f.create_dataset('Cr_err', data=Cr_err)
                h5f.create_dataset('Mn_err', data=Mn_err)
                h5f.create_dataset('Fe_err', data=Fe_err)
                h5f.create_dataset('Co_err', data=Co_err)
                h5f.create_dataset('Ni_err', data=Ni_err)
                h5f.create_dataset('Cu_err', data=Cu_err)
                h5f.create_dataset('Ge_err', data=Ge_err)
                h5f.create_dataset('Ce_err', data=Ce_err)
                h5f.create_dataset('Rb_err', data=Rb_err)
                h5f.create_dataset('Y_err', data=Y_err)
                h5f.create_dataset('Nd_err', data=Nd_err)
                h5f.create_dataset('parallax_err', data=parallax_err)
                h5f.create_dataset('fakemag_err', data=fakemag_err)

        h5f.close()
        print(f'Successfully created {self.filename}.h5 in {currentdir}')
Exemplo n.º 37
0
    def _generator(self,
                   return_type,
                   save_with_name="",
                   save_location='',
                   clobber=False,
                   overwrite=False,
                   do_not_scale_image_data=True,
                   return_fname=False,
                   ccd_kwargs=None,
                   **kwd):
        """
        Generator that yields each {name} in the collection.

        If any of the parameters ``save_with_name``, ``save_location`` or
        ``overwrite`` evaluates to ``True`` the generator will write a copy of
        each FITS file it is iterating over. In other words, if
        ``save_with_name`` and/or ``save_location`` is a string with non-zero
        length, and/or ``overwrite`` is ``True``, a copy of each FITS file will
        be made.

        Parameters
        ----------
        save_with_name : str, optional
            string added to end of file name (before extension) if
            FITS file should be saved after iteration. Unless
            ``save_location`` is set, files will be saved to location of
            the source files ``self.location``.
            Default is ``''``.

        save_location : str, optional
            Directory in which to save FITS files; implies that FITS
            files will be saved. Note this provides an easy way to
            copy a directory of files--loop over the {name} with
            ``save_location`` set.
            Default is ``''``.

        overwrite : bool, optional
            If ``True``, overwrite input FITS files.
            Default is ``False``.

        clobber : bool, optional
            Alias for ``overwrite``.
            Default is ``False``.

        do_not_scale_image_data : bool, optional
            If ``True``, prevents fits from scaling images. Default is
            ``{default_scaling}``.
            Default is ``True``.

        return_fname : bool, optional
            If True, return the tuple (header, file_name) instead of just
            header. The file name returned is the name of the file only,
            not the full path to the file.
            Default is ``False``.

        ccd_kwargs : dict, optional
            Dict with parameters for `~astropy.nddata.fits_ccddata_reader`.
            For instance, the key ``'unit'`` can be used to specify the unit
            of the data. If ``'unit'`` is not given then ``'adu'`` is used as
            the default unit.
            See `~astropy.nddata.fits_ccddata_reader` for a complete list of
            parameters that can be passed through ``ccd_kwargs``.

        **kwd :
            Any additional keywords are used to filter the items returned; see
            `files_filtered` examples for details.

        Returns
        -------
        `{return_type}`
            If ``return_fname`` is ``False``, yield the next {name} in the
            collection.

        (`{return_type}`, str)
            If ``return_fname`` is ``True``, yield a tuple of
            ({name}, ``file name``) for the next item in the collection.
        """
        # store mask so we can reset at end--must COPY, otherwise
        # current_mask just points to the mask of summary
        if not self.summary:
            return

        current_mask = {}
        for col in self.summary.columns:
            current_mask[col] = self.summary[col].mask

        if kwd:
            self._find_keywords_by_values(**kwd)

        ccd_kwargs = ccd_kwargs or {}

        for full_path in self._paths():
            add_kwargs = {'do_not_scale_image_data': do_not_scale_image_data}

            # We need to open the file here, get the appropriate values and then
            # close it again before it "yields" otherwise it's not garantueed
            # that the generator actually advances and closes the file again.
            # For example if one uses "next" on the generator manually the
            # file handle could "leak".
            if return_type == 'header':
                return_thing = fits.getheader(full_path, self.ext)
            elif return_type == 'data':
                return_thing = fits.getdata(full_path, self.ext, **add_kwargs)
            elif return_type == 'ccd':
                return_thing = fits_ccddata_reader(full_path,
                                                   hdu=self.ext,
                                                   **ccd_kwargs)
            elif return_type == 'hdu':
                with fits.open(full_path, **add_kwargs) as hdulist:
                    ext_index = hdulist.index_of(self.ext)
                    # Need to copy the HDU to prevent lazy loading problems
                    # and "IO operations on closed file" errors
                    return_thing = hdulist[ext_index].copy()
            else:
                raise ValueError('no generator for {}'.format(return_type))

            file_name = path.basename(full_path)
            if return_fname:
                yield return_thing, file_name
            else:
                yield return_thing

            if save_location:
                destination_dir = save_location
            else:
                destination_dir = path.dirname(full_path)
            basename = path.basename(full_path)
            if save_with_name:
                base, ext = path.splitext(basename)
                basename = base + save_with_name + ext

            new_path = path.join(destination_dir, basename)

            # I really should have called the option overwrite from
            # the beginning. The hack below ensures old code works,
            # at least...
            if clobber or overwrite:
                if _ASTROPY_LT_1_3:
                    nuke_existing = {'clobber': True}
                else:
                    nuke_existing = {'overwrite': True}
            else:
                nuke_existing = {}

            if return_type == 'ccd':
                pass
            elif (new_path != full_path) or nuke_existing:
                with fits.open(full_path, **add_kwargs) as hdulist:
                    ext_index = hdulist.index_of(self.ext)
                    if return_type == 'hdu':
                        hdulist[ext_index] = return_thing
                    elif return_type == 'data':
                        hdulist[ext_index].data = return_thing
                    elif return_type == 'header':
                        hdulist[ext_index].header = return_thing

                    try:
                        hdulist.writeto(new_path, **nuke_existing)
                    except IOError:
                        logger.error('error writing file %s', new_path)
                        raise

        # reset mask
        for col in self.summary.columns:
            self.summary[col].mask = current_mask[col]
Exemplo n.º 38
0
def sdss_score(flist, **kwargs):
    """Score a list of imaging fields from zero to one.
    """
    import time
    import numpy as np
    from astropy import log
    from astropy.io import fits
    from warnings import warn
    from ..sdssio import sdss_name, sdss_calib
    from ...pydlutils.sdss import sdss_flagval
    if 'silent' not in kwargs:
        kwargs['silent'] = True
    lat = 32.780361
    lng = 360.0 - 105.820417
    tzone = 7
    scores = 1
    #
    # Read the PHOTO status bits
    #
    if not kwargs['silent']:
        log.info('Setting PHOTO status bits')
    t1 = time.time()
    nlist = flist[1].header.get('NAXIS2')
    fdata = flist[1].data
    for k in range(nlist):
        if not kwargs['silent'] and ((k % 1000) == 0):
            log.info("Setting PHOTO status {0:d} {1:d}".format(k, nlist))
        thisfile = sdss_name('fpFieldStat', fdata.field('RUN')[k],
                             fdata.field('CAMCOL')[k], fdata.field('FIELD')[k],
                             fdata.field('RERUN')[k])
        try:
            fpfield = fits.open(thisfile)
        except IOError:
            warn("Retrying fpFieldStat file for RUN = {0:d} CAMCOL = {1:d} FIELD = {2:d} RERUN = {3}".format(fdata.field('RUN')[k],
                 fdata.field('CAMCOL')[k], fdata.field('FIELD')[k],
                 fdata.field('RERUN')[k]))
            try:
                fpfield = fits.open(thisfile)
            except IOError:
                warn("Bad fpFieldStat file for RUN = {0:d} CAMCOL = {1:d} FIELD = {2:d} RERUN = {3}".format(fdata.field('RUN')[k],
                     fdata.field('CAMCOL')[k], fdata.field('FIELD')[k],
                     fdata.field('RERUN')[k]))
                fdata.field('PHOTO_STATUS')[k] = -1
                if not kwargs['silent']:
                    log.info('Trying tsField instead.')
                thisfile = sdss_name('tsField', fdata.field('RUN')[k],
                                     fdata.field('CAMCOL')[k],
                                     fdata.field('FIELD')[k],
                                     fdata.field('RERUN')[k])
                try:
                    tsfield = fits.open(thisfile)
                except IOError:
                    warn('Bad tsField file.')
                else:
                    if not kwargs['silent']:
                        log.info('tsField found, using frames_status.')
                    fdata.field('PHOTO_STATUS')[k] = tsfield[1].data.field('frames_status')[0]
            else:
                fdata.field('PHOTO_STATUS')[k] = fpfield[1].data.field('status')[0]
        else:
            fdata.field('PHOTO_STATUS')[k] = fpfield[1].data.field('status')[0]
    if not kwargs['silent']:
        log.info("Time to set PHOTO status = {0:f} sec".format(time.time()-t1))
    #
    # Read in the PSP status
    #
    if not kwargs['silent']:
        log.info('Setting PSP status bits')
    t2 = time.time()
    for k in range(nlist):
        if not kwargs['silent'] and ((k % 1000) == 0):
            log.info("Setting PSP status {0:d} {1:d}".format(k, nlist))
        thisfile = sdss_name('psField', fdata.field('RUN')[k],
                             fdata.field('CAMCOL')[k], fdata.field('FIELD')[k],
                             fdata.field('RERUN')[k])
        try:
            psfield = fits.open(thisfile)
        except IOError:
            warn("Retrying psField file for RUN = {0:d} CAMCOL = {1:d} FIELD = {2:d} RERUN = {3}".format(fdata.field('RUN')[k],
                 fdata.field('CAMCOL')[k], fdata.field('FIELD')[k],
                 fdata.field('RERUN')[k]))
            try:
                psfield = fits.open(thisfile)
            except IOError:
                warn("Bad psField file for RUN = {0:d} CAMCOL = {1:d} FIELD = {2:d} RERUN = {3}".format(fdata.field('RUN')[k],
                     fdata.field('CAMCOL')[k], fdata.field('FIELD')[k],
                     fdata.field('RERUN')[k]))
                fdata.field('PSP_STATUS')[k] = -1
                fdata.field('PSF_FWHM')[k] = -1
                fdata.field('SKYFLUX')[k] = -1
        pixscale = 0.396 * np.sqrt(fdata.field('XBIN')[k]**2 +
                                   fdata.field('YBIN')[k]**2)/np.sqrt(2.0)
        calibinfo = sdss_calib(fdata.field('RUN')[k],
                               fdata.field('CAMCOL')[k],
                               fdata.field('FIELD')[k],
                               fdata.field('RERUN')[k], **kwargs)
        fdata.field('PSP_STATUS')[k] = psfield[6].data.field('status')[0]
        fdata.field('PSF_FWHM')[k] = psfield[6].data.field('psf_width')[0]
        fdata.field('SKYFLUX')[k] = (psfield[6].data.field('sky')[0] *
                                     calibinfo['NMGYPERCOUNT']/pixscale**2)
    if not kwargs['silent']:
        log.info("Time to set PSP status = {0:f} sec".format(time.time()-t2))
    #
    # Decide if each field exists in all 5 bands.
    #
    bad_bits = sdss_flagval('image_status', ['bad_rotator', 'bad_astrom',
                            'bad_focus', 'shutters'])
    if 'ignoreframesstatus' in kwargs:
        ignoreframesstatus = np.zeros(fdata.field('PHOTO_STATUS').shape) == 0
    else:
        ignoreframesstatus = np.zeros(fdata.field('PHOTO_STATUS').shape) == 1
    qexist = (fdata.field('PHOTO_STATUS') == 0) | ignoreframesstatus
    for k in range(5):
        qexist &= (fdata.field('IMAGE_STATUS')[:, k] & bad_bits) == 0
    #
    # Decide if each field is phtometric in all 5 bands.
    #
    unphot_bits = sdss_flagval('image_status', ['cloudy', 'unknown',
                               'ff_petals', 'dead_ccd', 'noisy_ccd'])
    qphot = fdata.field('SUN_ANGLE') < -12
    for k in range(5):
        qphot &= (fdata.field('IMAGE_STATUS')[:, k] & unphot_bits) == 0
    for k in range(5):
        qphot &= (((fdata.field('PSP_STATUS')[:, k] & 31) <= 2) |
                  (fdata.field('XBIN') > 1) | ignoreframesstatus)
    #
    # Now set the score for each field
    #
    sensitivity = (0.7 / (fdata.field('PSF_FWHM')[:, 2] *
                   np.sqrt(fdata.field('SKYFLUX')[:, 2]))) < 0.4
    fdata.field('SCORE')[:] = qexist * (0.1 + 0.5*qphot + sensitivity)
    ibinned = np.find(fdata.field('XBIN') > 1)
    if len(ibinned) > 0:
        fdata.field('SCORE')[ibinned] *= 0.1
    #
    # Look for any NaN values, which could happen for example if there
    # is a corrupted psField file and PSF_FWHM or SKYFLUX was negative.
    #
    ibad = np.find(~np.isfinite(fdata.field('SCORE')))
    if len(ibad) > 0:
        warn("Changing NaN scores for {0:d} fields to zero.".format(len(ibad)))
        fdata.field('SCORE')[ibad] = 0
    return fdata.field('SCORE')
Exemplo n.º 39
0
#!/usr/bin/env python
# -*- coding: utf8 -*-
#import ds9   #	doesn't bloody work
import numpy as np
from astropy.io import fits
import pyregion
import pyregion._region_filter as filter

# define some SPIRE-typical constants
beam = [465, 822, 1769]						# beam area, in arcsec^2
ebar = [1.0432705,1.0521442,1.1025926] 		# effective beam area ratio (beta=2 SPIRE UM, table 5.8, page 96; see also M. Smith email, 16/04/14)
cnoise = [0.0058,0.0063,0.0068]				# confusion noise, in Jy/beam, as from Nguyen et al. 2010
kk = 2.*np.pi/(360.*3600.)					# 1 arcsec in rad
img = raw_input('Name of the SPIRE data: ')
reg_name = raw_input('Name of the DS9 file containing the regions for flux and sky measurements: ')
hdulist = fits.open(img)
wavel = input('Wavelength of the image: ')
if wavel == 250:
	wind = 0
if wavel == 350:
	wind = 1
if wavel == 500:
	wind = 2
header = hdulist[1].header
fluxmap = hdulist[1].data
errmap = hdulist[2].data
nx = header["NAXIS1"]
ny = header["NAXIS2"]
size = (ny, nx)		#	this is the size of the image
pixsz = np.abs(header["CDELT1"])*3600.
## converts from MJy/sr to Jy/pix
Exemplo n.º 40
0
def create_product_page(prodname, zoom_size=128, wcsname="",
                        gcolor='magenta', fsize=8):
    """Create a matplotlib Figure() object which summarizes this product FITS file."""

    # obtain image data to display
    with fits.open(prodname) as prod:
        data = prod[1].data
        phdr = prod[0].header
        if 'texptime' not in phdr:
            return None, None
        targname = phdr['targname']
        inst = phdr['instrume']
        det = phdr['detector']
        texptime = phdr['texptime']
        inexp = phdr['d001data'].split('[')[0]
        wcstype = prod[1].header['wcstype']
        wcs = wcsutil.HSTWCS(prod, ext=1)
        hdrtab = prod['hdrtab'].data
        filters = ';'.join([phdr[f] for f in phdr['filter*']])
        dateobs = phdr['date-obs']  # human-readable date
        expstart = phdr['expstart']  # MJD float value
        asnid = phdr.get('asn_id', '')

    center = (data.shape[0] // 2, data.shape[1] // 2)
    prod_path = os.path.split(prodname)[0]

    data = np.nan_to_num(data, nan=0.0)

    # Get GAIA catalog
    refcat = amutils.create_astrometric_catalog(prodname, existing_wcs=wcs,
                                                output=None)
    refx, refy = wcs.all_world2pix(refcat['RA'], refcat['DEC'], 0)
    # Remove points outside the full-size image area
    rx = []
    ry = []
    zx = []
    zy = []
    for x, y in zip(refx, refy):
        if 0 < x < data.shape[1] and 0 < y < data.shape[0]:
            rx.append(x)
            ry.append(y)
        if -zoom_size < x - center[1] < zoom_size and -zoom_size < y - center[0] < zoom_size:
            zx.append(x - center[1] + zoom_size)
            zy.append(y - center[0] + zoom_size)

    # Define subplot regions on page
    fig = plt.figure(constrained_layout=True, figsize=(8.5, 11))
    gs = fig.add_gridspec(ncols=4, nrows=5)

    # title plots
    rootname = os.path.basename(prodname)
    img_title = "{} image of {} with WCSNAME={}".format(rootname, targname, wcsname)
    fig.suptitle(img_title, ha='center', va='top', fontsize=fsize)

    # Define image display
    fig_img = fig.add_subplot(gs[:3, :])
    fig_zoom = fig.add_subplot(gs[3:, 2:])
    fig_summary = fig.add_subplot(gs[3:, :2])

    # Compute display range
    dmax = (data.max() // 10) if data.max() <= 1000. else 100
    dscaled = np.log10(np.clip(data, -0.1, dmax) + 0.10001)
    # identify zoom region around center of data
    zoom = dscaled[center[0] - zoom_size:center[0] + zoom_size,
                   center[1] - zoom_size:center[1] + zoom_size]

    # display full image
    fig_img.imshow(dscaled, cmap='gray', origin='lower')
    # display zoomed section
    fig_zoom.imshow(zoom, cmap='gray', origin='lower')

    # define markerstyle
    mstyle = markers.MarkerStyle(marker='o')
    mstyle.set_fillstyle('none')
    # plot GAIA sources onto full size image
    fig_img.scatter(rx, ry, marker=mstyle, alpha=0.35, c=gcolor, s=3)
    fig_zoom.scatter(zx, zy, marker=mstyle, alpha=0.35, c=gcolor)

    # Print summary info
    pname = os.path.split(prodname)[1]
    fig_summary.text(0.01, 0.95, "Summary for {}".format(pname), fontsize=fsize)
    fig_summary.text(0.01, 0.9, "WCSNAME: {}".format(wcsname), fontsize=fsize)
    fig_summary.text(0.01, 0.85, "TARGET: {}".format(targname), fontsize=fsize)
    fig_summary.text(0.01, 0.8, "Instrument: {}/{}".format(inst, det), fontsize=fsize)
    fig_summary.text(0.01, 0.75, "Filters: {}".format(filters), fontsize=fsize)
    fig_summary.text(0.01, 0.7, "Total Exptime: {}".format(texptime), fontsize=fsize)
    fig_summary.text(0.01, 0.65, "WCSTYPE: {}".format(wcstype), fontsize=fsize)
    fig_summary.text(0.01, 0.5, "Total # of GAIA sources: {}".format(len(refx)), fontsize=fsize)
    fig_summary.text(0.01, 0.45, "# of GAIA matches: {}".format(len(rx)), fontsize=fsize)

    # Get extended information about observation
    hdrtab_cols = hdrtab.columns.names
    mtflag = get_col_val(hdrtab, 'mtflag', default="")
    gyromode = get_col_val(hdrtab, 'gyromode', default='N/A')


    # populate JSON summary info
    summary = dict(wcsname=wcsname, targname=targname, asnid=asnid,
                    dateobs=dateobs, expstart=expstart,
                    instrument=(inst, det), exptime=texptime,
                    wcstype=wcstype, num_gaia=len(refx), filters=filters,
                    rms_ra=-1, rms_dec=-1, nmatch=-1, catalog="")
    obs_kws = ['gyromode', 'fgslock', 'aperture', 'mtflag', 'subarray',
                'obstype', 'obsmode', 'scan_typ', 'photmode']
    for kw in obs_kws:
        summary[kw] = get_col_val(hdrtab, kw, default="")


    if 'FIT' in wcsname:
        # Look for FIT RMS and other stats from headerlet
        exp = fits.open(os.path.join(prod_path, inexp))
        for ext in exp:
            if 'extname' in ext.header and ext.header['extname'] == 'HDRLET' \
                and ext.header['wcsname'] == wcsname:
                hdrlet = ext.headerlet
                rms_ra = hdrlet[0].header['rms_ra']
                rms_dec = hdrlet[0].header['rms_dec']
                nmatch = hdrlet[0].header['nmatch']
                catalog = hdrlet[0].header['catalog']

                fit_vals = dict(rms_ra=rms_ra, rms_dec=rms_dec, nmatch=nmatch, catalog=catalog)
                summary.update(fit_vals)
                break
        exp.close()
        try:
            fig_summary.text(0.01, 0.4, "RMS: RA={:0.3f}mas, DEC={:0.3f}mas".format(rms_ra, rms_dec), fontsize=fsize)
            fig_summary.text(0.01, 0.35, "# matches: {}".format(nmatch), fontsize=fsize)
            fig_summary.text(0.01, 0.3, "Matched to {} catalog".format(catalog), fontsize=fsize)
        except:
            fig_summary.text(0.01, 0.35, "No MATCH to GAIA")
            print("Data without a match to GAIA: {},{}".format(inexp, wcsname))


    return fig, summary
Exemplo n.º 41
0
    if man_dec is not None:
        man_dec = float(man_dec)
    man_flipx = args.flipx
    man_flipy = args.flipy
    man_rotate = float(args.rotate)
    man_target = args.target
    keep_wcs = args.keep_wcs
    telescope = args.telescope
    filenames = args.images

    # read telescope information from fits headers
    instruments = []
    for filename in filenames:
        try:
            hdulist = fits.open(filename,
                                verify='ignore',
                                ignore_missing_end=True)
        except IOError:
            raise IOError('File %s does not exist! Abort.' % filename)

        header = hdulist[0].header
        for key in _pp_conf.instrument_keys:
            if key in header:
                instruments.append(header[key])
        hdulist.close()

    if telescope is None:
        try:
            telescope = _pp_conf.instrument_identifiers[instruments[0]]
        except:
            print('cannot identify telescope/instrument; '
Exemplo n.º 42
0
    def ingest(filename):
        # create path and filename formatter
        if hasattr(settings,
                   'PATH_FORMATTER') and settings.PATH_FORMATTER is not None:
            path_fmt = FilenameFormatter(settings.PATH_FORMATTER)
        else:
            raise ValueError('No path formatter configured.')
        filename_fmt = None
        if hasattr(settings, 'FILENAME_FORMATTER'
                   ) and settings.FILENAME_FORMATTER is not None:
            filename_fmt = FilenameFormatter(settings.FILENAME_FORMATTER)

        # get archive root
        root = settings.ARCHIVE_ROOT

        # open file
        log.info('Opening new file to ingest...')
        fits_file = fits.open(filename)

        # get path for archive
        path = path_fmt(fits_file['SCI'].header)

        # get filename for archive
        if isinstance(filename_fmt, FilenameFormatter):
            name = filename_fmt(fits_file['SCI'].header)
        else:
            tmp = os.path.basename(fits_file['SCI'].header['FNAME'])
            name = tmp[:tmp.find('.')] if '.' in tmp else tmp
        log.info('Formatted filename to %s.', name)

        # create new filename and set it in header
        out_filename = name + '.fits.fz'
        fits_file['SCI'].header['FNAME'] = name

        # find or create image
        if Frame.objects.filter(basename=name).exists():
            img = Frame.objects.get(basename=name)
        else:
            img = Frame(basename=name)

        # set headers
        img.path = path
        img.add_fits_header(fits_file['SCI'].header)

        # write to database
        log.info('Writing to database...')
        img.save()

        # link related
        img.link_related(fits_file['SCI'].header)

        # create path if necessary
        file_path = os.path.join(root, path)
        if not os.path.exists(file_path):
            os.makedirs(file_path)

        # write FITS file to byte stream and close
        with io.BytesIO() as bio:
            log.info('Writing file...')
            fits_file.writeto(bio)
            fits_file.close()

            # pipe data into fpack
            log.info('Fpacking file...')
            proc = subprocess.Popen(['/usr/bin/fpack', '-S', '-'],
                                    stdin=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    stdout=open(
                                        os.path.join(file_path, out_filename),
                                        'wb'))
            proc.communicate(bytes(bio.getbuffer()))

            # all good store it
            if proc.returncode == 0:
                log.info('Stored image as %s...', img.basename)
                return img.basename
            else:
                raise ValueError('Could not fpack file %s.' % filename)
Exemplo n.º 43
0
import numpy as np
import math 
import matplotlib
from astropy.io import fits


#Import image source positions:
y, x = np.loadtxt("ssa22_XY.reg", skiprows=0, unpack = True)
segmap = '/Users/Brett/Desktop/Research/ssa22/Code/check.fits'
image = '/Users/Brett/Desktop/Research/ssa22/Code/Mosaic_North2_drz_sci.fits'

hdulist_seg = fits.open(segmap)
hdulist_img = fits.open(image)


seg_data = hdulist_seg[0].data
img_data = hdulist_img[0].data


n1 = len(x)


M=0

for i in xrange(n1):
		source = seg_data[x[i],y[i]]
		if source != 0:
			
			here = np.where( seg_data == source)
			flux = img_data[here]
			
Exemplo n.º 44
0
def prepare(filenames,
            obsparam,
            header_update,
            keep_wcs=False,
            flipx=False,
            flipy=False,
            rotate=0,
            man_ra=None,
            man_dec=None,
            diagnostics=False,
            display=False):
    """
    prepare wrapper
    output: diagnostic properties
    """

    # start logging
    logging.info('preparing data with parameters: %s' %
                 (', '.join([('%s: %s' % (var, str(val)))
                             for var, val in list(locals().items())])))

    # change FITS file extensions to .fits
    for idx, filename in enumerate(filenames):
        if filename.split('.')[-1] in ['fts', 'FTS', 'FITS', 'fit', 'FIT']:
            os.rename(filename, '.'.join(filename.split('.')[:-1]) + '.fits')
            filenames[idx] = '.'.join(filename.split('.')[:-1]) + '.fits'
            logging.info('change filename from "%s" to "%s"' %
                         (filename, filenames[idx]))

    # identify keywords for GENERIC telescopes

    # open one sample image file
    hdulist = fits.open(filenames[0],
                        verify='ignore',
                        ignore_missing_end='True')
    header = hdulist[0].header

    # check if this is a single-extension FITS file
    if float(header['NAXIS']) > 2.:
        logging.error('This is not a single-extension FITS file. Please '
                      'extract individual extensions and run them '
                      'individually.')
        sys.exit()

    # keywords that have to be implanted into each image
    implants = {}

    # if GENERIC telescope, ask user for header keywords
    if obsparam['telescope_keyword'] is 'GENERIC':
        keywords = {
            'pixel scale in arcsec/px before binning': 'secpix',
            'binning factor in both axes': 'binning',
            'image center RA (keyword or degrees)': 'ra',
            'image center DEC (keyword or degrees)': 'dec',
            'filter used (clear if none was used)': 'filter',
            'observation midtime': 'date_keyword',
            'exposure time (seconds)': 'exptime'
        }

        for description, keyword in list(keywords.items()):

            try:
                if obsparam[keyword] in header:
                    continue
            except:
                pass

            inp = input('%s? > ' % description)

            if keyword is 'secpix':
                obsparam[keyword] = (float(inp), float(inp))
            if keyword is 'binning':
                implants['BINX'] = (float(inp), 'PP: user-defined')
                implants['BINY'] = (float(inp), 'PP: user-defined')
            if keyword is 'ra':
                try:
                    implants['OBJCTRA'] = (float(inp), 'PP: user_defined')
                    obsparam['radec_separator'] = 'XXX'
                except TypeError:
                    obsparam['ra'] = inp
                # # check for separator
                # try:
                #     dummy = float(header[inp])
                #     obsparam['radec_separator'] = 'XXX'
                # except ValueError:
                #     if ':' in header[inp]:
                #         obsparam['radec_separator'] = ':'
                #     if ' ' in header[inp].strip():
                #         obsparam['radec_separator'] = ' '
            if keyword is 'dec':
                try:
                    implants['OBJCTDEC'] = (float(inp), 'PP: user_defined')
                    obsparam['radec_separator'] = 'XXX'
                except TypeError:
                    obsparam['dec'] = inp
            if keyword is 'filter':
                implants[obsparam['filter']] = (inp, 'PP: user-defined')
            if keyword is 'date_keyword':
                obsparam['date_keyword'] = inp
            if keyword is 'exptime':
                implants['EXPTIME'] = (float(inp), 'PP: user-defined')

        implants['INSTRUME'] = ('GENERIC', 'PP: manually set')

    # prepare image headers for photometry pipeline

    for filename in filenames:

        if display:
            print('preparing', filename)

        # open image file
        hdulist = fits.open(filename,
                            mode='update',
                            verify='silentfix',
                            ignore_missing_end=True)
        header = hdulist[0].header

        # add other headers, if available
        if len(hdulist) > 1:
            for i in range(len(hdulist)):
                try:
                    header += hdulist[i].header
                except:
                    pass

        # account for flips and rotation in telescope configuration
        # if instrument has several chips...
        if 'chip_id' in obsparam:
            chip_id = header[obsparam['chip_id']]
            this_flipx = obsparam['flipx'][chip_id]
            this_flipy = obsparam['flipy'][chip_id]
            this_rotate = obsparam['rotate'][chip_id]
        # if not...
        else:
            this_flipx = obsparam['flipx']
            this_flipy = obsparam['flipy']
            this_rotate = obsparam['rotate']

        if flipx:
            this_flipx = numpy.invert(this_flipx)
        if flipy:
            this_flipy = numpy.invert(this_flipy)
        if rotate > 0:
            this_rotate += rotate

        # read image data
        imdata = hdulist[0].data

        # check if image is a cube, or a single frame put into a cube
        if len(imdata.shape) > 2:
            # this image is a cube
            if imdata.shape[0] == 1:
                # this is a single image put into a cube
                # turn this into a single-frame fits file
                imdata = imdata[0]
            else:
                # this is really a cube; don't know what to do
                raise TypeError(("%s is a cube FITS file; don't know how to "
                                 "handle this file...") % filename)

        # add header keywords for Source Extractor
        if 'EPOCH' not in header:
            header['EPOCH'] = (2000, 'PP: required for registration')
        # if 'EQUINOX' not in header:
        #     header['EQUINOX'] = (2000, 'PP: required for registration')

        # add header keywords for SCAMP
        header['PHOTFLAG'] = ('F', 'PP: data is not photometric (SCAMP)')
        header['PHOT_K'] = (0.05, 'PP: assumed extinction coefficient')

        if not keep_wcs:

            # remove keywords that might collide with fake wcs
            for key in list(header.keys()):
                if re.match('^CD[1,2]_[1,2]', key) is not None:
                    # if key not in obsparam.values():
                    #     header.remove(key)
                    if not toolbox.if_val_in_dict(key, obsparam):
                        header.remove(key)
                elif 'PV' in key and '_' in key:
                    header.remove(key)
                elif key in [
                        'CTYPE1',
                        'CRPIX1',
                        'CRVAL1',
                        'CROTA1',
                        'CROTA2',
                        'CFINT1',
                        'CTYPE2',
                        'CRPIX2',
                        'CRVAL2',
                        'CFINT2',
                        'LTM1_1',
                        'LTM2_2',
                        'WAT0_001',
                        'LTV1',
                        'LTV2',
                        'PIXXMIT',
                        'PIXOFFST',
                        'PC1_1',
                        'PC1_2',
                        'PC2_1',
                        'PC2_2',
                        #'CUNIT1', 'CUNIT2',
                        'A_ORDER',
                        'A_0_0',
                        'A_0_1',
                        'A_0_2',
                        'A_1_0',
                        'A_1_1',
                        'A_2_0',
                        'B_ORDER',
                        'B_0_0',
                        'B_0_1',
                        'B_0_2',
                        'B_1_0',
                        'B_1_1',
                        'B_2_0',
                        'AP_ORDER',
                        'AP_0_0',
                        'AP_0_1',
                        'AP_0_2',
                        'AP_1_0',
                        'AP_1_1',
                        'AP_2_0',
                        'BP_ORDER',
                        'BP_0_0',
                        'BP_0_1',
                        'BP_0_2',
                        'BP_1_0',
                        'BP_1_1',
                        'BP_2_0',
                        'CDELT1',
                        'CDELT2',
                        'CRDELT1',
                        'CRDELT2'
                ]:
                    if not toolbox.if_val_in_dict(key, obsparam):
                        header.remove(key)

            # normalize CUNIT keywords
            try:
                if 'degree' in header['CUNIT1'].lower():
                    header['CUNIT1'] = ('deg')
                if 'degree' in header['CUNIT2'].lower():
                    header['CUNIT2'] = ('deg')
            except KeyError:
                pass

            # if GENERIC telescope, add implants to header
            if obsparam['telescope_keyword'] is 'GENERIC':
                for key, val in list(implants.items()):
                    header[key] = (val[0], val[1])

        # read out image binning mode
        binning = toolbox.get_binning(header, obsparam)

        # add pixel resolution keyword
        header['SECPIXX'] = (obsparam['secpix'][0] * binning[0],
                             'PP: x pixscale after binning')
        header['SECPIXY'] = (obsparam['secpix'][1] * binning[1],
                             'PP: y pixscale after binning')

        # create observation midtime jd
        if not keep_wcs or 'MIDTIMJD' not in header:
            if obsparam['date_keyword'].find('|') == -1:
                header['MIDTIMJD'] = \
                    (toolbox.dateobs_to_jd(header[obsparam['date_keyword']]) +
                     float(header[obsparam['exptime']])/2./86400.,
                     'PP: obs midtime')
            else:
                datetime = (header[obsparam['date_keyword'].split('|')[0]] +
                            'T' +
                            header[obsparam['date_keyword'].split('|')[1]])
                datetime = datetime.replace('/', '-')
                header['MIDTIMJD'] = (
                    toolbox.dateobs_to_jd(datetime) +
                    float(header[obsparam['exptime']]) / 2. / 86400.,
                    'PP: obs midtime')

        # other keywords
        header['TELINSTR'] = (obsparam['telescope_instrument'],
                              'PP: tel/instr name')
        header['TEL_KEYW'] = (obsparam['telescope_keyword'],
                              'PP: tel/instr keyword')

        header[obsparam['filter']] = header[obsparam['filter']].strip()
        header['FILTER'] = (header[obsparam['filter']], 'PP:copied')
        header['EXPTIME'] = (header[obsparam['exptime']], 'PP: copied')
        if obsparam['airmass'] in header:
            header['AIRMASS'] = (header[obsparam['airmass']], 'PP: copied')
        else:
            header['AIRMASS'] = (1, 'PP: fake airmass')

        # check if filter can be translated by PP
        try:
            obsparam['filter_translations'][header[obsparam['filter']]]
        except KeyError:
            logging.warning('cannot translate filter keyword \"' +
                            header[obsparam['filter']] + '\"')
            #header[obsparam['filter']] = 'clear'
        header['FILTER'] = (header[obsparam['filter']], 'PP:copied')

        # perform header update
        for key, value in list(header_update.items()):
            if key in header:
                header['_' + key[:6]] = (header[key],
                                         'PP: old value for %s' % key)
            header[key] = (value, 'PP: manually updated')

        # check if OBJECT keyword is available
        if 'OBJECT' not in header:
            header['OBJECT'] = 'None'
        elif len(header['OBJECT'].strip()) == 0:
            header['OBJECT'] = 'None'

        # # check if RA, Dec, airmass headers are available;
        # #   else: query horizons
        # # to get approximate information
        # if (obsparam['ra'] not in header or
        #     obsparam['dec'] not in header or
        #     obsparam['airmass'] not in header):

        #     logging.info('Either RA, Dec, or airmass missing from image ' +
        #                  'header; pull approximate information for Horizons')

        #     # obtain approximate ra and dec (and airmass) from JPL Horizons
        #     eph = callhorizons.query(header[obsparam['object']].
        #                              replace('_', ' '))
        #     eph.set_discreteepochs(header['MIDTIMJD'])
        #     try:
        #         n = eph.get_ephemerides(obsparam['observatory_code'])
        #     except ValueError:
        #         logging.warning('Target (%s) is not an asteroid' %
        #                         header[obsparam['object']])
        #         n = None

        #     if n is None:
        #         raise KeyError((('%s is not an asteroid known '
        #                          'to JPL Horizons') %
        #                          header[obsparam['object']]))

        #     header[obsparam['ra']] = (eph['RA'][0],
        #                               'PP: queried from Horizons')
        #     header[obsparam['dec']] = (eph['DEC'][0],
        #                                'PP: queried from Horizons')
        #     header[obsparam['airmass']] = (eph['airmass'][0],
        #                                    'PP: queried from Horizons')

        # add fake wcs information that is necessary to run SCAMP

        # read out ra and dec from header
        if obsparam['radec_separator'] == 'XXX':
            ra_deg = float(header[obsparam['ra']])
            dec_deg = float(header[obsparam['dec']])
        else:
            ra_string = header[obsparam['ra']].split(
                obsparam['radec_separator'])
            dec_string = header[obsparam['dec']].split(
                obsparam['radec_separator'])
            ra_deg = 15. * (float(ra_string[0]) +
                            old_div(float(ra_string[1]), 60.) +
                            old_div(float(ra_string[2]), 3600.))
            dec_deg = (abs(float(dec_string[0])) +
                       old_div(float(dec_string[1]), 60.) +
                       old_div(float(dec_string[2]), 3600.))
            if dec_string[0].find('-') > -1:
                dec_deg = -1 * dec_deg

        # transform to equinox J2000, if necessary
        if 'EQUINOX' in header:
            equinox = float(header['EQUINOX'])
            if equinox != 2000.:
                anyeq = SkyCoord(ra=ra_deg * u.deg,
                                 dec=dec_deg * u.deg,
                                 frame=FK5,
                                 equinox=Time(equinox,
                                              format='jyear',
                                              scale='utc'))
                coo = anyeq.transform_to(ICRS)
                ra_deg = coo.ra.deg
                dec_deg = coo.dec.deg
                header['EQUINOX'] = (2000.0, 'PP: normalized to ICRS')

        if man_ra is not None and man_dec is not None:
            ra_deg = float(man_ra)
            dec_deg = float(man_dec)

        # special treatment for UKIRT/WFCAM
        if obsparam['telescope_keyword'] == 'UKIRTWFCAM':
            try:
                ra_deg = (float(header['TELRA']) / 24. * 360. -
                          old_div(float(header['JITTER_X']), 3600.))
                dec_deg = (float(header['TELDEC']) -
                           old_div(float(header['JITTER_Y']), 3600.))
            except KeyError:
                # JITTER keywords not in combined images
                pass

        # apply flips
        xnorm, ynorm = 1, 1
        if this_flipx:
            xnorm = -1
        if this_flipy:
            ynorm = -1

        # check if instrument has a chip offset
        ra_offset, dec_offset = 0, 0
        if (man_ra is None or man_dec is None) and \
           'chip_offset_fixed' in obsparam:
            cid = header[obsparam['chip_id']]
            ra_offset = float(obsparam['chip_offset_fixed'][cid][0])
            dec_offset = float(obsparam['chip_offset_fixed'][cid][1])

        if not keep_wcs:
            # create fake header
            header['RADECSYS'] = ('FK5', 'PP: fake wcs coordinates')
            header['RADESYS'] = ('FK5', 'PP: fake wcs coordinates')
            header['CTYPE1'] = ('RA---TAN', 'PP: fake Coordinate type')
            header['CTYPE2'] = ('DEC--TAN', 'PP: fake Coordinate type')
            header['CRVAL1'] = (ra_deg + ra_offset,
                                'PP: fake Coordinate reference value')
            header['CRVAL2'] = (dec_deg + dec_offset,
                                'PP: fake Coordinate reference value')
            header['CRPIX1'] = (int(
                old_div(float(header[obsparam['extent'][0]]),
                        2)), 'PP: fake Coordinate reference pixel')
            header['CRPIX2'] = (int(
                old_div(float(header[obsparam['extent'][1]]),
                        2)), 'PP: fake Coordinate reference pixel')

            # plugin default distortion parameters, if available
            if 'distort' in obsparam:
                if 'functionof' in obsparam['distort']:
                    pv_dict = obsparam['distort'][header[obsparam['distort']
                                                         ['functionof']]]
                else:
                    pv_dict = obsparam['distort']

                try:
                    for pv_key, pv_val in pv_dict.items():
                        header[pv_key] = (pv_val, 'PP: default distortion')
                except KeyError:
                    logging.error(('No distortion coefficients available for '
                                   '%s %s') %
                                  (obsparam['distort']['functionof'],
                                   header[obsparam['distort']['functionof']]))

            header['CD1_1'] = (xnorm *
                               numpy.cos(this_rotate / 180. * numpy.pi) *
                               obsparam['secpix'][0] * binning[0] / 3600.,
                               'PP: fake Coordinate matrix')
            header['CD1_2'] = (ynorm *
                               -numpy.sin(this_rotate / 180. * numpy.pi) *
                               obsparam['secpix'][1] * binning[1] / 3600.,
                               'PP: fake Coordinate matrix')
            header['CD2_1'] = (xnorm *
                               numpy.sin(this_rotate / 180. * numpy.pi) *
                               obsparam['secpix'][0] * binning[0] / 3600.,
                               'PP: fake Coordinate matrix')
            header['CD2_2'] = (ynorm *
                               numpy.cos(this_rotate / 180. * numpy.pi) *
                               obsparam['secpix'][1] * binning[1] / 3600.,
                               'PP: fake Coordinate matrix')

        # crop center from LOWELL42 frames
        if obsparam['telescope_keyword'] == 'LOWELL42':
            imdata = imdata[100:-100, 100:-100]
            logging.info('cropping LOWELL42 data')

        # overwrite imdata in case something has been modified
        hdulist[0].data = imdata

        hdulist.flush()
        hdulist.close()

        logging.info('created fake wcs information for image %s' % filename)

    # create diagnostics
    if diagnostics:
        diag.create_index(filenames, os.getcwd(), obsparam, display)

    logging.info('Done! -----------------------------------------------------')

    return None
Exemplo n.º 45
0
import  pylab               as      pl
import  numpy               as      np 
import  astropy.io.fits     as      fits
 
from    astropy.table       import  Table, join, Column 
from    desitarget.targets  import  encode_targetid
from    phot_sys            import  set_photsys
from    make_tiny           import  make_tiny


print('\n\nWelcome.\n\n')

scratch       = os.environ['CSCRATCH']

##
hsc           = Table(fits.open(scratch + '/BGS/SV-ASSIGN/elgs/hsc_north.fits')[1].data)
hsc           = hsc[hsc['DEC'] > -30.]
hsc           = set_photsys(hsc)
hsc           = hsc[hsc['PHOTSYS'] == 'BMZLS']

cols          = hsc.columns
cols          = ['TARGETID', 'RA','DEC', 'DCHISQ', 'EBV', 'FLUX_G', 'FLUX_R', 'FLUX_Z', 'NOBS_G','NOBS_R','NOBS_Z', 'SHAPEEXP_R', 'TYPE','mizuki_photoz_best','FRANKENZ','PHOTSYS', 'PSFSIZE_G', 'PSFSIZE_R', 'PSFSIZE_Z']

hsc           = hsc[cols]
hsc           = hsc[hsc['TYPE'] == 'REX']
hsc['MIZUKI'] = hsc['mizuki_photoz_best']
hsc           = hsc[hsc['SHAPEEXP_R'] > 2.5]

urls          = ['http://viewer.legacysurvey.org/?ra={:.4f}&dec={:.4f}&zoom=16&layer=dr8'.format(i['RA'], i['DEC']) for i in hsc]
urls          = [make_tiny(url) for url in urls]
hsc['TURL']   =  urls
Exemplo n.º 46
0
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from utils import *
import scipy.ndimage.morphology as sp
from matplotlib.colors import LogNorm
import csv
import pandas as pd
from scipy.optimize import curve_fit

hdulist = fits.open("A1_mosaic.fits")
magzpt = hdulist[0].header['MAGZPT']
magzrr = hdulist[0].header['MAGZRR']
image = hdulist[0].data


def linfit(x, a, b):
    return (a * x + b)


"""
fluxmaster = []
for i in range(100,1500,200):
    imageslice = image[100:300, i:i+200]
    fluxarray = em.fluxarray(imageslice, Rohan=True, im_show=True)
    for j in fluxarray:
        fluxmaster.append(j)
print(fluxmaster)

"""
d1 = pd.read_csv("catalogue.csv")
Exemplo n.º 47
0
This is a python procedure to read the galaxy file, add AGN to it (multiple times) ,the Agn being variable means
the variability can be simulated using this model.
'''

from astropy.io import fits
import matplotlib.pyplot as plt
from glob import glob
import numpy as np

s = np.zeros(15)
x = np.arange(0, 15)

#To open the galaxy.fits file and introduce a gaussian psf (the AGN ) inside it.
# 15 night continuous observations.

galaxy = fits.open('gal_moffat.fits')
gal_data = galaxy[0].data

#15 night continuous AGN observations with small variability in the amplitude.

t = glob('agn*.fits')
for i in range(0, 15):
    k = t[i]
    agn = fits.open(k)
    agn_data = agn[0].data
    com = gal_data + agn_data
    print('%i th image being processed' % i)
    new = fits.PrimaryHDU(com)
    myfit = fits.HDUList([new])
    myfit.writeto('com%i.fits' % i, overwrite=True)
    myfit.close()
Exemplo n.º 48
0
    """return the key of dictionary dic given the value"""
    return [k for k, v in dic.iteritems() if v == val]

filein       = sys.argv[1]
filename, ext = os.path.splitext(filein)
if ext == ".gz":
    filename = os.path.splitext(filename)[0]
fileout      =  filename + "_filtered.fits"

print filename

# Binning of the resulting spectrum (in keV)
newbinsize = 1.0
N_primaries = float(filein.split(".")[0].split("_")[1])

hdulist   = pyfits.open(filein)
hdr       = hdulist[1].header
data      = hdulist[1].data
eventid   = data.field("EventID")
endep     = data.field("En_dep")
X_ID      = data.field("X_ID")
Y_ID      = data.field("Y_ID")
theta     = data.field("Theta_Primary")
phi       = data.field("Phi_Primary")
hdulist.close()

# Initialize output arrays
eventid_out   = []
endep_out     = []
theta_out     = []
phi_out       = []
Exemplo n.º 49
0
def keppixseries(infile,
                 outfile=None,
                 plotfile=None,
                 plottype='global',
                 filterlc=False,
                 function='boxcar',
                 cutoff=1.0,
                 overwrite=False,
                 verbose=False,
                 logfile='keppixseries.log'):
    """
    keppixseries -- individual time series photometry for all pixels within a
    target mask

    keppixseries plots a light curve for each individual pixel in a target
    mask. Light curves are extracted from a target pixel file obtained from the
    Kepler data archive at MAST. If required, the data can be fed through a
    boxcar, gaussian or sinc function high bandpass filter in order to remove
    low frequency signal from the data. keppixseries is a diagnostic tool for
    identifying source contaminants in the background or foreground of the
    target. It can be employed to identify pixels for inclusion or exclusion
    when re-extracting a Kepler light curve from target pixel files.

    Parameters
    ----------
    infile : str
        The name of a MAST standard format FITS file containing Kepler Target
        Pixel data within the first data extension.
    outfile : str
        The name of the output FITS file. This file has two data extensions.
        The first called 'PIXELSERIES' contains a table with columns of
        barycenter-corrected time, barycenter time correction, cadence number,
        cadence quality flag and a series of photometric light curves, one for
        each pixel within the target mask. Each pixel is labeled COLx_ROWy,
        where :math:`x` is the pixel column number and :math:`y` is the pixel
        row number on the CCD module/output. The second extension contains the
        mask definition map copied directly from the input target pixel file.
    plotfile : str
        Name of an optional diagnostic output plot file containing the results
        of keppixseries. An example is provided in Figure 1. Typically this is
        a PNG format file. If no diagnostic file is required, plotfile can be
        'None'. The plot will be generated regardless of the value of this
        field, but the plot will not be saved to a file if ``plotfile='None'``.
    plottype : str
        keppixseries can plot light curves of three types.
        The choice is made using this argument. The options are:

        * local - All individual pixel light curves are scaled separately to
          provide the most dynamic range for each pixel.
        * global - All pixel light curves are scaled between zero and the
          maximum flux attained by the brightest pixel in the mask. This option
          provides the relative contribution to the archived light curve by each
          pixel.
        * full - All pixels light curves are scaled between zero and the
          maximum flux attained by that pixel. This provides the fraction of
          variability within each individual pixel.
    filterlc : bool
        If True, the light curve for each pixel will be treated by a high
        band-pass filter to remove long-term trends from e.g. differential
        velocity aberration.
    function : str
        The functional form of the high pass-band filter:

        * boxcar
        * gauss
        * sinc
    cutoff : float
        The frequency of the high pass-band cutoff in units of :math:`days^{-1}`.
    overwrite : bool
        Overwrite the output file?
    verbose : bool
        Print informative messages and warnings to the shell and logfile?
    logfile = str
        Name of the logfile containing error and warning messages.

    Examples
    --------
    .. code-block :: bash

        $ keppixseries kplr008256049-2010174085026_lpd-targ.fits.gz

    .. image:: ../_static/images/api/keppixseries.png
        :align: center
    """
    if outfile is None:
        outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
    # log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = ('KEPPIXSERIES -- ' + ' infile={}'.format(infile) +
            ' outfile={}'.format(outfile) + ' plotfile={}'.format(plotfile) +
            ' plottype={}'.format(plottype) + ' filterlc={}'.format(filterlc) +
            ' function={}'.format(function) + ' cutoff={}'.format(cutoff) +
            ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) +
            ' logfile={}'.format(logfile))
    kepmsg.log(logfile, call + '\n', verbose)

    # start time
    kepmsg.clock('KEPPIXSERIES started at', logfile, verbose)

    # overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = ('ERROR -- KEPPIXSERIES: {} exists. Use --overwrite'.format(
            outfile))
        kepmsg.err(logfile, errmsg, verbose)

    # open TPF FITS file
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, barytime = \
        kepio.readTPF(infile, 'TIME', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
        kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, cadno = \
        kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
        kepio.readTPF(infile, 'FLUX', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
        kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, qual = \
        kepio.readTPF(infile, 'QUALITY', logfile, verbose)

    # read mask defintion data from TPF file
    maskimg, pixcoord1, pixcoord2 = kepio.readMaskDefinition(
        infile, logfile, verbose)
    # print target data
    print('')
    print('      KepID:  {}'.format(kepid))
    print(' RA (J2000):  {}'.format(ra))
    print('Dec (J2000): {}'.format(dec))
    print('     KepMag:  {}'.format(kepmag))
    print('   SkyGroup:    {}'.format(skygroup))
    print('     Season:    {}'.format(season))
    print('    Channel:    {}'.format(channel))
    print('     Module:    {}'.format(module))
    print('     Output:     {}'.format(output))
    print('')
    # how many quality = 0 rows?
    npts = 0
    nrows = len(fluxpixels)
    for i in range(nrows):
        if (qual[i] == 0 and np.isfinite(barytime[i])
                and np.isfinite(fluxpixels[i, ydim * xdim // 2])):
            npts += 1
    time = np.empty((npts))
    timecorr = np.empty((npts))
    cadenceno = np.empty((npts))
    quality = np.empty((npts))
    pixseries = np.empty((ydim, xdim, npts))
    errseries = np.empty((ydim, xdim, npts))

    # construct output light curves
    nptsx = 0
    for i in tqdm(range(ydim)):
        for j in range(xdim):
            npts = 0
            for k in range(nrows):
                if (qual[k] == 0 and np.isfinite(barytime[k])
                        and np.isfinite(fluxpixels[k, int(ydim * xdim / 2)])):
                    time[npts] = barytime[k]
                    timecorr[npts] = tcorr[k]
                    cadenceno[npts] = cadno[k]
                    quality[npts] = qual[k]
                    pixseries[i, j, npts] = fluxpixels[k, nptsx]
                    errseries[i, j, npts] = errpixels[k, nptsx]
                    npts += 1
            nptsx += 1
    # define data sampling
    if filterlc:
        tpf = pyfits.open(infile)
        cadence = kepkey.cadence(tpf[1], infile, logfile, verbose)
        tr = 1.0 / (cadence / 86400)
        timescale = 1.0 / (cutoff / tr)

        # define convolution function
        if function == 'boxcar':
            filtfunc = np.ones(int(np.ceil(timescale)))
        elif function == 'gauss':
            timescale /= 2
            dx = np.ceil(timescale * 10 + 1)
            filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale],
                                np.linspace(0, dx - 1, dx))
        elif function == 'sinc':
            dx = np.ceil(timescale * 12 + 1)
            fx = np.linspace(0, dx - 1, dx)
            fx = fx - dx / 2 + 0.5
            fx /= timescale
            filtfunc = np.sinc(fx)
        filtfunc /= np.sum(filtfunc)

        # pad time series at both ends with noise model
        for i in range(ydim):
            for j in range(xdim):
                ave, sigma = (np.mean(pixseries[i, j, :len(filtfunc)]),
                              np.std(pixseries[i, j, :len(filtfunc)]))
                padded = np.append(
                    kepstat.randarray(
                        np.ones(len(filtfunc)) * ave,
                        np.ones(len(filtfunc)) * sigma), pixseries[i, j, :])
                ave, sigma = (np.mean(pixseries[i, j, -len(filtfunc):]),
                              np.std(pixseries[i, j, -len(filtfunc):]))
                padded = np.append(
                    padded,
                    kepstat.randarray(
                        np.ones(len(filtfunc)) * ave,
                        np.ones(len(filtfunc)) * sigma))
                # convolve data
                convolved = np.convolve(padded, filtfunc, 'same')
                # remove padding from the output array
                outdata = convolved[len(filtfunc):-len(filtfunc)]
                # subtract low frequencies
                outmedian = np.median(outdata)
                pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian

    # construct output file
    print("Writing output file {}...".format(outfile))
    if ydim * xdim < 1000:
        instruct = pyfits.open(infile, 'readonly')
        kepkey.history(call, instruct[0], outfile, logfile, verbose)
        hdulist = pyfits.HDUList(instruct[0])
        cols = []
        cols.append(
            pyfits.Column(name='TIME',
                          format='D',
                          unit='BJD - 2454833',
                          disp='D12.7',
                          array=time))
        cols.append(
            pyfits.Column(name='TIMECORR',
                          format='E',
                          unit='d',
                          disp='E13.6',
                          array=timecorr))
        cols.append(
            pyfits.Column(name='CADENCENO',
                          format='J',
                          disp='I10',
                          array=cadenceno))
        cols.append(pyfits.Column(name='QUALITY', format='J', array=quality))
        for i in range(ydim):
            for j in range(xdim):
                colname = 'COL{}_ROW{}'.format(i + column, j + row)
                cols.append(
                    pyfits.Column(name=colname,
                                  format='E',
                                  disp='E13.6',
                                  array=pixseries[i, j, :]))
        hdu1 = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
        try:
            hdu1.header['INHERIT'] = (True, 'inherit the primary header')
        except:
            pass
        try:
            hdu1.header['EXTNAME'] = ('PIXELSERIES', 'name of extension')
        except:
            pass
        try:
            hdu1.header['EXTVER'] = (
                instruct[1].header['EXTVER'],
                'extension version number (not format version)')
        except:
            pass
        try:
            hdu1.header['TELESCOP'] = (instruct[1].header['TELESCOP'],
                                       'telescope')
        except:
            pass
        try:
            hdu1.header['INSTRUME'] = (instruct[1].header['INSTRUME'],
                                       'detector type')
        except:
            pass
        try:
            hdu1.header['OBJECT'] = (instruct[1].header['OBJECT'],
                                     'string version of KEPLERID')
        except:
            pass
        try:
            hdu1.header['KEPLERID'] = (instruct[1].header['KEPLERID'],
                                       'unique Kepler target identifier')
        except:
            pass
        try:
            hdu1.header['RADESYS'] = (
                instruct[1].header['RADESYS'],
                'reference frame of celestial coordinates')
        except:
            pass
        try:
            hdu1.header['RA_OBJ'] = (instruct[1].header['RA_OBJ'],
                                     '[deg] right ascension from KIC')
        except:
            pass
        try:
            hdu1.header['DEC_OBJ'] = (instruct[1].header['DEC_OBJ'],
                                      '[deg] declination from KIC')
        except:
            pass
        try:
            hdu1.header['EQUINOX'] = (instruct[1].header['EQUINOX'],
                                      'equinox of celestial coordinate system')
        except:
            pass
        try:
            hdu1.header['TIMEREF'] = (
                instruct[1].header['TIMEREF'],
                'barycentric correction applied to times')
        except:
            pass
        try:
            hdu1.header['TASSIGN'] = (instruct[1].header['TASSIGN'],
                                      'where time is assigned')
        except:
            pass
        try:
            hdu1.header['TIMESYS'] = (instruct[1].header['TIMESYS'],
                                      'time system is barycentric JD')
        except:
            pass
        try:
            hdu1.header['BJDREFI'] = (instruct[1].header['BJDREFI'],
                                      'integer part of BJD reference date')
        except:
            pass
        try:
            hdu1.header['BJDREFF'] = (
                instruct[1].header['BJDREFF'],
                'fraction of the day in BJD reference date')
        except:
            pass
        try:
            hdu1.header['TIMEUNIT'] = (instruct[1].header['TIMEUNIT'],
                                       'time unit for TIME, TSTART and TSTOP')
        except:
            pass
        try:
            hdu1.header['TSTART'] = (instruct[1].header['TSTART'],
                                     'observation start time in BJD-BJDREF')
        except:
            pass
        try:
            hdu1.header['TSTOP'] = (instruct[1].header['TSTOP'],
                                    'observation stop time in BJD-BJDREF')
        except:
            pass
        try:
            hdu1.header['LC_START'] = (instruct[1].header['LC_START'],
                                       'mid point of first cadence in MJD')
        except:
            pass
        try:
            hdu1.header['LC_END'] = (instruct[1].header['LC_END'],
                                     'mid point of last cadence in MJD')
        except:
            pass
        try:
            hdu1.header['TELAPSE'] = (instruct[1].header['TELAPSE'],
                                      '[d] TSTOP - TSTART')
        except:
            pass
        try:
            hdu1.header['LIVETIME'] = (instruct[1].header['LIVETIME'],
                                       '[d] TELAPSE multiplied by DEADC')
        except:
            pass
        try:
            hdu1.header['EXPOSURE'] = (instruct[1].header['EXPOSURE'],
                                       '[d] time on source')
        except:
            pass
        try:
            hdu1.header['DEADC'] = (instruct[1].header['DEADC'],
                                    'deadtime correction')
        except:
            pass
        try:
            hdu1.header['TIMEPIXR'] = (instruct[1].header['TIMEPIXR'],
                                       'bin time beginning=0 middle=0.5 end=1')
        except:
            pass
        try:
            hdu1.header['TIERRELA'] = (instruct[1].header['TIERRELA'],
                                       '[d] relative time error')
        except:
            pass
        try:
            hdu1.header['TIERABSO'] = (instruct[1].header['TIERABSO'],
                                       '[d] absolute time error')
        except:
            pass
        try:
            hdu1.header['INT_TIME'] = (
                instruct[1].header['INT_TIME'],
                '[s] photon accumulation time per frame')
        except:
            pass
        try:
            hdu1.header['READTIME'] = (instruct[1].header['READTIME'],
                                       '[s] readout time per frame')
        except:
            pass
        try:
            hdu1.header['FRAMETIM'] = (instruct[1].header['FRAMETIM'],
                                       '[s] frame time (INT_TIME + READTIME)')
        except:
            pass
        try:
            hdu1.header['NUM_FRM'] = (instruct[1].header['NUM_FRM'],
                                      'number of frames per time stamp')
        except:
            pass
        try:
            hdu1.header['TIMEDEL'] = (instruct[1].header['TIMEDEL'],
                                      '[d] time resolution of data')
        except:
            pass
        try:
            hdu1.header['DATE-OBS'] = (instruct[1].header['DATE-OBS'],
                                       'TSTART as UTC calendar date')
        except:
            pass
        try:
            hdu1.header['DATE-END'] = (instruct[1].header['DATE-END'],
                                       'TSTOP as UTC calendar date')
        except:
            pass
        try:
            hdu1.header['BACKAPP'] = (instruct[1].header['BACKAPP'],
                                      'background is subtracted')
        except:
            pass
        try:
            hdu1.header['DEADAPP'] = (instruct[1].header['DEADAPP'],
                                      'deadtime applied')
        except:
            pass
        try:
            hdu1.header['VIGNAPP'] = (
                instruct[1].header['VIGNAPP'],
                'vignetting or collimator correction applied')
        except:
            pass
        try:
            hdu1.header['GAIN'] = (instruct[1].header['GAIN'],
                                   '[electrons/count] channel gain')
        except:
            pass
        try:
            hdu1.header['READNOIS'] = (instruct[1].header['READNOIS'],
                                       '[electrons] read noise')
        except:
            pass
        try:
            hdu1.header['NREADOUT'] = (instruct[1].header['NREADOUT'],
                                       'number of read per cadence')
        except:
            pass
        try:
            hdu1.header['TIMSLICE'] = (instruct[1].header['TIMSLICE'],
                                       'time-slice readout sequence section')
        except:
            pass
        try:
            hdu1.header['MEANBLCK'] = (instruct[1].header['MEANBLCK'],
                                       '[count] FSW mean black level')
        except:
            pass
        hdulist.append(hdu1)
        hdulist.writeto(outfile)
        kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2],
                   outfile, logfile, verbose)
        pyfits.append(outfile, instruct[2].data, instruct[2].header)
        instruct.close()
    else:
        warnmsg = ('WARNING -- KEPPIXSERIES: output FITS file requires > 999'
                   'columns. Non-compliant with FITS convention.')
        kepmsg.warn(logfile, warnmsg)

    # plot pixel array
    fmin = 1.0e33
    fmax = -1.033
    plt.figure()
    plt.clf()
    dx = 0.93 / xdim
    dy = 0.94 / ydim
    ax = plt.axes([0.06, 0.05, 0.93, 0.94])
    plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
    plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
    plt.gca().xaxis.set_major_locator(plt.MaxNLocator(integer=True))
    plt.gca().yaxis.set_major_locator(plt.MaxNLocator(integer=True))
    plt.xlim(np.min(pixcoord1) - 0.5, np.max(pixcoord1) + 0.5)
    plt.ylim(np.min(pixcoord2) - 0.5, np.max(pixcoord2) + 0.5)
    plt.xlabel('time', {'color': 'k'})
    plt.ylabel('arbitrary flux', {'color': 'k'})
    for i in range(ydim):
        for j in range(xdim):
            tmin = np.amin(time)
            tmax = np.amax(time)
            try:
                np.isfinite(np.amin(pixseries[i, j, :]))
                np.isfinite(np.amin(pixseries[i, j, :]))
                fmin = np.amin(pixseries[i, j, :])
                fmax = np.amax(pixseries[i, j, :])
            except:
                ugh = 1
            xmin = tmin - (tmax - tmin) / 40
            xmax = tmax + (tmax - tmin) / 40
            ymin = fmin - (fmax - fmin) / 20
            ymax = fmax + (fmax - fmin) / 20
            if kepstat.bitInBitmap(maskimg[i, j], 2):
                plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
                         facecolor='lightslategray')
            elif maskimg[i, j] == 0:
                plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
                         facecolor='black')
            else:
                plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy])
            if j == int(xdim / 2) and i == 0:
                plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
            elif j == 0 and i == int(ydim / 2):
                plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
            else:
                plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
            ptime = time * 1.0
            ptime = np.insert(ptime, [0], ptime[0])
            ptime = np.append(ptime, ptime[-1])
            pflux = pixseries[i, j, :] * 1.0
            pflux = np.insert(pflux, [0], -1000.0)
            pflux = np.append(pflux, -1000.0)
            plt.plot(time,
                     pixseries[i, j, :],
                     color='#0000ff',
                     linestyle='-',
                     linewidth=0.5)
            if not kepstat.bitInBitmap(maskimg[i, j], 2):
                plt.fill(ptime,
                         pflux,
                         fc='lightslategray',
                         linewidth=0.0,
                         alpha=1.0)
            plt.fill(ptime, pflux, fc='#FFF380', linewidth=0.0, alpha=1.0)
            if 'loc' in plottype:
                plt.xlim(xmin, xmax)
                plt.ylim(ymin, ymax)
            if 'glob' in plottype:
                plt.xlim(xmin, xmax)
                plt.ylim(1.0e-10, np.nanmax(pixseries) * 1.05)
            if 'full' in plottype:
                plt.xlim(xmin, xmax)
                plt.ylim(1.0e-10, ymax * 1.05)

    # render plot
    plt.show()
    plt.savefig(plotfile)

    # stop time
    kepmsg.clock('KEPPIXSERIES ended at', logfile, verbose)
Exemplo n.º 50
0
def read_files(filter, basedir):
    """ Read in the files for a given filter and base directory.
    The structure of the directory is such that within this directory there
    are cat/ and psf/ sub-directories containing the relevant catalogs

    Note: Do not include the final slash!
    e.g. on NERSC the basedir looks like either
    /global/cscratch1/sd/amichoi/UltraVISTA
    or
    /global/cscratch1/sd/amichoi/VIDEO
    """
    survey = os.path.split(basedir)[1]
    print(survey)

    # First read in the SExtractor for PSFEx catalog
    if survey == 'UltraVISTA':
        sex_file = os.path.join(basedir,
                                'cat/UVISTA_%s_21_01_16_psfcat.fits' % filter)
    elif survey == 'VIDEO':
        sex_file = os.path.join(
            basedir, 'cat/VIDEO_%s_10_36.80_-5.01_psfcat.fits' % filter)

    dat = fits.open(sex_file)
    cols = dat[2].columns
    #print(cols)
    sex = Table(dat[2].data)
    print("Length of sex file: ", len(sex))

    #read in catalog containing list of stars made from Sextractor and PSFEx
    if survey == 'UltraVISTA':
        star_file = os.path.join(
            basedir,
            'psf_minsn500/UVISTA_%s_21_01_16_psfex-starlist.fits' % filter)
    elif survey == 'VIDEO':
        star_file = os.path.join(
            basedir,
            'psf/VIDEO_%s_10_36.80_-5.01_psfex-starlist.fits' % (filter))

    dat = fits.open(star_file)
    cols = dat[2].columns
    #print(cols)
    star = Table(dat[2].data)
    print("Length of star file: ", len(star))

    sex['X_IMAGE'] = sex['X_IMAGE'].astype(int)
    star['X_IMAGE'] = star['X_IMAGE'].astype(int)
    sex['Y_IMAGE'] = sex['Y_IMAGE'].astype(int)
    star['Y_IMAGE'] = star['Y_IMAGE'].astype(int)
    sexstarmerge = join(sex,
                        star,
                        keys=['X_IMAGE', 'Y_IMAGE'],
                        join_type='inner')

    #sexstarmerge = join(sex, star, join_type='inner')
    print("length of merged cat: ", len(sexstarmerge))

    cols = tuple(name for name in sexstarmerge.colnames
                 if len(sexstarmerge[name].shape) <= 1)
    t2 = sexstarmerge[cols]
    sexstardf = t2.to_pandas()

    return sexstarmerge, sex, star
Exemplo n.º 51
0
from time import strftime, gmtime

## specify source files
path_to_cal = 'ArchCal/' # source directory
dates = [f for f in os.listdir(path_to_cal) if not f.startswith('.')] # index date folders in ArchCal
path_to_cal += max(dates)+'/' # specify path as most recent date
filenames = [f for f in os.listdir(path_to_cal) if not f.startswith('.')] # list of filenames to process

print('Searching %s for calibraton files...' % path_to_cal)

bias,dark,Red,Green,Blue,R,V,B,Halpha,Lum,filters = [],[],[],[],[],[],[],[],[],[],[] # initialize lists
# lists are used to store the data for each calibration file and then combine into a master

## sort the calibration images by type and store them in arrays
for filename in filenames:
    img = fits.open(path_to_cal+filename) # open each image
    data = img[0].data # split into data and header
    header = img[0].header
    if header['IMAGETYP']=='Bias Frame':
        bias_header = img[0].header # save header for each image type to attach to master version
        bias.append(data) # add data array to type list
    if header['IMAGETYP']=='Dark Frame':
        dark_header = img[0].header
        dark.append(data)
    if header['IMAGETYP']=='Flat Field':
        flat_header = img[0].header
        filters.append(header['FILTER']) # store the filters found in this directory in a list
        # so that we don't attempt to create new master flats with filters we did not have raw flats for
        code = header['FILTER']+'.append(data)' # string operations to add data to filter-specific list
        exec(code)
    del img
Exemplo n.º 52
0
 SPLUS-s29s31-50.22486407163766--37.134297664400115-256_R.fits (1)
 result = []
 hAlphaFlux = []
 bands = ['_F660_','_I_','_R_']
 i = 0 
 coef_F660 = 125.3
 coef_R = 1419.0
 fnu_R = 0
 for file in glob.glob("*_swp.fits"):
     
     for i in range(len(bands)):
         if bands[i] in file:
             mainOutput = sys.stdout
     
             fileName = "FILENAME  =  " + file
             openFile = fits.open(file)
             header = openFile[1].header
             data = openFile[1].data
             header = str(header)
             fileNameList = []
             fileNameList.append(fileName)
             headerList = []
             headerList.append(header)
             dataList = []
             dataList.append(data)
             result.append(fileNameList + headerList + dataList)
 
 for i in range(len(result)):
     
     
     fileName2 = str(result[i])
Exemplo n.º 53
0
def load_images(images,
                ra,
                dec,
                wcs_dict,
                orbit=None,
                dra=None,
                ddec=None,
                target=DS9_NAME,
                regions=None,
                rejected=False,
                basedate=None):
    """
    Load a list of images into a ds9 session.

    Note: orbit overrides what is in ra/dec/dra/ddec

    :param images: list of image filenames (full path)
    :param wcs_dict: Dictionary of WCS objects (wcs_dict[images[0]] will be WCS associated with image in frame 1)
    :param orbit: a BKObrbit object used to put the circle on the target based on the MJD of the image.
    :param ra: RA of source being measured (deg; used put a circle on ds9 image)
    :param dec: DEC of the source being measured (deg; used put a circle on ds9 image)
    :param dra: draw an error ellipse at ra/dec using dra/ddec sizes if no orbit given.
    :param ddec: draw an error ellipse at ra/dec using dra/ddec sizes if no orbit given.
    :param target: name of the ds9 session to put images into (aka 'validate')
    :param regions: name of file that holds a list of regions that should be maked on the display.
    :param rejected: Is this a rejected / null observation.
    :param basedate: date that the ra/dec is valid for (aka epoch of coordinate)
    :return:
    """
    ds9 = get_ds9(target)
    ds9.set('frame delete all')
    ds9.set('zscale')
    for image in images:
        ds9.set('frame new')
        ds9.set(f"file {image}")
        with fits.open(image) as hdulist:
            header = hdulist[1].header
            obsdate = Time(hdulist[0].header['DATE-AVG'], scale='tai').utc
            if basedate is None:
                basedate = obsdate
            wcs_dict[image] = WCS(header)
            colour = 'yellow'
            if orbit is not None:
                orbit.predict(obsdate)
                ra1 = orbit.coordinate.ra.degree
                dec1 = orbit.coordinate.dec.degree
                uncertainty_ellipse = (orbit.dra.to('arcsec').value,
                                       orbit.ddec.to('arcsec').value,
                                       orbit.pa.to('degree').value + 90)
                colour = 'green'
                for obs in orbit.observations:
                    record_index = obs.date.mpc[0:13]
                    if record_index == obsdate.mpc[0:13]:
                        colour = not obs.null_observation and 'cyan' or 'red'
                        circle = f'icrs; circle({obs.coordinate.ra.degree}d,{obs.coordinate.dec.degree}d,5p) # color={colour}'
                        ds9.set('regions', circle)
                        break
            else:
                ra1 = ra + dra * (obsdate - basedate).to('hour').value / 3600.
                dec1 = dec + ddec * (obsdate -
                                     basedate).to('hour').value / 3600.0
                uncertainty_ellipse = 3, 3, 0
                colour = 'pink'
            colour = rejected and 'red' or colour
            ds9.set(
                'regions', f'icrs; ellipse({ra1},{dec1},'
                f'{uncertainty_ellipse[0]}",'
                f'{uncertainty_ellipse[1]}",'
                f'{uncertainty_ellipse[2]}) # color={colour}')
            ds9.set(f'pan to {ra1} {dec1} wcs icrs')
            logging.debug(f'Loading regions from {regions}')
            if regions is not None:
                ds9.set(f'regions {regions}')
    ds9.set('frame match wcs')
    ds9.set('frame first')
Exemplo n.º 54
0
def measure_strehl(HDUlist_or_filename=None,
                   ext=0,
                   slice=0,
                   center=None,
                   display=True,
                   verbose=True,
                   cache_perfect=False):
    """ Estimate the Strehl ratio for a PSF.

    This requires computing a simulated PSF with the same
    properties as the one under analysis.

    Note that this calculation will not be very accurate unless both PSFs are well sampled,
    preferably several times better than Nyquist. See
    `Roberts et al. 2004 SPIE 5490 <http://adsabs.harvard.edu/abs/2004SPIE.5490..504R>`_
    for a discussion of the various possible pitfalls when calculating Strehl ratios.

    WARNING: This routine attempts to infer how to calculate a perfect reference
    PSF based on FITS header contents. It will likely work for simple direct imaging
    cases with WebbPSF but will not work (yet) for more complicated cases such as
    coronagraphy, anything with image or pupil masks, etc. Code contributions to add
    such cases are welcomed.


    Parameters
    ----------
    HDUlist_or_filename : string
        Either a fits.HDUList object or a filename of a FITS file on disk
    ext : int
        Extension in that FITS file
    slice : int, optional
        If that extension is a 3D datacube, which slice (plane) of that datacube to use
    center : tuple
        center to compute around.  Default is image center. If the center is on the
        crosshairs between four pixels, then the mean of those four pixels is used.
        Otherwise, if the center is in a single pixel, then that pixel is used.
    verbose, display : bool
        control whether to print the results or display plots on screen.

    cache_perfect : bool
        use caching for perfect images? greatly speeds up multiple calcs w/ same config

    Returns
    ---------
    strehl : float
        Strehl ratio as a floating point number between 0.0 - 1.0

    """

    from .webbpsf_core import Instrument
    from poppy import display_PSF

    if isinstance(HDUlist_or_filename, six.string_types):
        HDUlist = fits.open(HDUlist_or_filename)
    elif isinstance(HDUlist_or_filename, fits.HDUList):
        HDUlist = HDUlist_or_filename
    else:
        raise ValueError("input must be a filename or HDUlist")

    image = HDUlist[ext].data
    header = HDUlist[ext].header

    if image.ndim >= 3:  # handle datacubes gracefully
        image = image[slice, :, :]

    if center is None:
        # get exact center of image
        #center = (image.shape[1]/2, image.shape[0]/2)
        center = tuple((a - 1) / 2.0 for a in image.shape[::-1])

    # Compute a comparison image
    _log.info("Now computing image with zero OPD for comparison...")
    inst = Instrument(header['INSTRUME'])
    inst.filter = header['FILTER']
    inst.pupilopd = None  # perfect image
    inst.pixelscale = header['PIXELSCL'] * header[
        'OVERSAMP']  # same pixel scale pre-oversampling
    cache_key = (header['INSTRUME'], header['FILTER'], header['PIXELSCL'],
                 header['OVERSAMP'], header['FOV'], header['NWAVES'])
    try:
        comparison_psf = _Strehl_perfect_cache[cache_key]
    except KeyError:
        comparison_psf = inst.calcPSF(fov_arcsec=header['FOV'],
                                      oversample=header['OVERSAMP'],
                                      nlambda=header['NWAVES'])
        if cache_perfect: _Strehl_perfect_cache[cache_key] = comparison_psf

    comparison_image = comparison_psf[0].data

    if (int(center[1]) == center[1]) and (int(center[0]) == center[0]):
        # individual pixel
        meas_peak = image[center[1], center[0]]
        ref_peak = comparison_image[center[1], center[0]]
    else:
        # average across a group of 4
        bot = [np.floor(f) for f in center]
        top = [np.ceil(f) + 1 for f in center]
        meas_peak = image[bot[1]:top[1], bot[0]:top[0]].mean()
        ref_peak = comparison_image[bot[1]:top[1], bot[0]:top[0]].mean()
    strehl = (meas_peak / ref_peak)

    if display:
        plt.clf()
        plt.subplot(121)
        display_PSF(HDUlist, title="Observed PSF")
        plt.subplot(122)
        display_PSF(comparison_psf, title="Perfect PSF")
        plt.gcf().suptitle("Strehl ratio = %.3f" % strehl)

    if verbose:
        print("Measured peak:  {0:.3g}".format(meas_peak))
        print("Reference peak: {0:.3g}".format(ref_peak))
        print("  Strehl ratio: {0:.3f}".format(strehl))

    return strehl
Exemplo n.º 55
0
def measure_image(p_name,
                  images,
                  wcs_dict,
                  discovery=False,
                  target=DS9_NAME,
                  zpt=26.9):
    """

    :param p_name: provisional name of the source being measured
    :param images: list of images being measured.  Expect that image[0] is in ds9 Frame 1
    :param wcs_dict: dictionary of WCS objects, one for each image. wcs_dict[images[0]] is the WCS objet for image in frame 1.
    :param discovery: Is this a discovery image (add a '*' to the MPC record)
    :param target: Name of the DS9 session, set by xpa (i.e. validate)
    :param zpt: zeropoint of the frame.
    :return: an mpc_ephem Observation record.
    :rtype: list(ObsRecord)

    """
    ds9 = get_ds9(target)
    # Build a map of allowed key strokes
    allowed_keys = {
        'x': ('', 'centroid at this location'),
        'q': ('', 'Quit this image set'),
        'p': ('', 'Previous frame'),
        'n': ('', 'Next frame'),
        'r': ('', 'Create a NULL observation.')
    }

    for key in [x.split() for x in config.read("MPC.NOTE1OPTIONS")]:
        allowed_keys[key[0].lower()] = key

    obs = {}
    while True:
        try:
            result = ds9.get('iexam key coordinate image')
            key, x, y = result.split()
            x = float(x)
            y = float(y)
            logging.debug(f"DS9 Returned: {result} -> {key} {x} {y}")
        except Exception as ex:
            logging.debug(f"DS9 get exception: {ex}")
            break

        if key == 'n':
            ds9.set('frame next')
            continue
        if key == 'p':
            ds9.set('frame prev')
            continue
        if key == 'q':
            break
        if key == 'Q':
            raise SystemExit("Q hit")

        if key not in allowed_keys:
            logging.info(f"Allowed keys: ")
            for key in allowed_keys:
                print(f"{key} -> {allowed_keys[key][1]}")
            continue

        note1 = allowed_keys[key][0]
        frame_no = int(ds9.get('frame')) - 1
        image = images[frame_no]
        ds9.set('regions', f'image; circle {x} {y} 5')
        centroid = not note1 == 'H'
        phot = daophot.phot_mag(image, [
            x,
        ], [
            y,
        ],
                                aperture=5,
                                sky_inner_radius=15,
                                sky_annulus_width=10,
                                apcor=0.3,
                                zmag=zpt,
                                maxcount=1000,
                                extno=1,
                                exptime=90.0,
                                centroid=centroid)

        phot_failure = (phot['PIER'][0] != 0 or phot.mask[0]['MAG']
                        or phot.mask[0]['MERR'])
        sky_failure = phot['SIER'][0] != 0
        cen_failure = phot['CIER'][0] != 0

        if phot_failure or sky_failure or cen_failure:
            logging.warning(f"iraf.daophot.phot error:\n {phot}")
            cen_x = x
            cen_y = y
            obs_mag = None
            obs_mag_err = None
            note1 = "H"
        else:
            cen_x = phot['XCENTER'][0]
            cen_y = phot['YCENTER'][0]
            obs_mag = phot['MAG'][0]
            obs_mag_err = phot['MERR'][0]

        colour = "{blue}"
        ds9.set('regions', f'image; circle {cen_x} {cen_y} 4 # color={colour}')

        obsdate = Time(Time(fits.open(image)[0].header['DATE-AVG'],
                            scale='tai').utc,
                       format='mjd',
                       precision=6).mpc

        ra = dec = "UNDEFINED"
        try:
            ra, dec = wcs_dict[image].all_pix2world(cen_x, cen_y, 1)
        except Exception as ex:
            logging.warning(
                f"Failure converting {cen_x, cen_y} to RA/DEC for {image}")
            logging.warning(ex)
            logging.warning(f"Got: {ra},{dec}")

        record_key = obsdate[0:13]
        null_obs = key in ['r', 'b']
        obs[record_key] = Observation(
            null_observation=null_obs,
            provisional_name=p_name,
            comment="{} {} {}".format(*util.from_provisional_name(p_name)),
            note1=note1,
            note2='C',
            date=obsdate,
            ra=ra,
            dec=dec,
            mag=obs_mag,
            mag_err=obs_mag_err,
            observatory_code='568',
            discovery=discovery,
            xpos=x,
            ypos=y,
            frame=os.path.splitext(os.path.basename(image))[0])

    return obs
Exemplo n.º 56
0
def get_CHMap_events(map_path):
    '''Return all the SPOCA_CoronalHole, SPOCA_CoronalHoleDetection,  SPOCA_CoronalHoleStatistics and SPOCA_CoronalHoleRun events in a SPoCA CHMap'''
    # Open the FITS file
    hdus = fits.open(map_path)

    # Create a sunpy Map for converting the pixel coordinates
    image_hdu = hdus[image_hdu_name]
    map = Map(image_hdu.data, image_hdu.header)

    # Get the regions by id
    regions_hdu = hdus[region_hdu_name]
    regions = {region['ID']: region for region in regions_hdu.data}

    # Get chaincodes by id
    chaincodes_hdu = hdus[chaincode_hdu_name]
    chaincodes = {
        id:
        (chaincodes_hdu.data['X%07d' % id], chaincodes_hdu.data['Y%07d' % id])
        for id in regions_hdu.data['ID']
    }

    # Get region stats by channel then by id
    region_stats = {
        hdus[region_stats_hdu_name].header['CHANNEL']: {
            region_stat['ID']: region_stat
            for region_stat in hdus[region_stats_hdu_name].data
        }
        for region_stats_hdu_name in region_stats_hdu_names
        if region_stats_hdu_name in hdus
    }

    # Create the CH events
    CH_events = list()

    # Keep the list of detections for the SPOCA_CoronalHoleRun event
    detection_names = list()

    for id, region in regions.items():

        events = dict()

        spoca_coronal_hole_detection_name = 'SPOCA_CoronalHoleDetection_{date}_{id}'.format(
            date=region['DATE_OBS'], id=id)
        events[
            'spoca_coronal_hole_detection'] = get_spoca_coronal_hole_detection(
                map,
                region,
                next(iter(region_stats.values()))[id],
                chaincodes[id],
                name=spoca_coronal_hole_detection_name)

        spoca_coronal_hole_name = 'SPOCA_CoronalHole_{color}'.format(
            color=region['TRACKED_COLOR'])
        events['spoca_coronal_hole'] = get_spoca_coronal_hole(
            spoca_coronal_hole_detection_name,
            region['DATE_OBS'] + 'Z',
            name=spoca_coronal_hole_name)

        events['spoca_coronal_hole_statistics'] = list()
        for channel, stats in region_stats.items():
            spoca_coronal_hole_statistics_name = 'SPOCA_CoronalHoleStatistics_{date}_{id}_{channel}'.format(
                date=region['DATE_OBS'], id=id, channel=channel)
            events['spoca_coronal_hole_statistics'].append(
                get_spoca_coronal_hole_statistics(
                    spoca_coronal_hole_detection_name,
                    channel,
                    stats[id],
                    name=spoca_coronal_hole_statistics_name))

        CH_events.append(events)

        detection_names.append(spoca_coronal_hole_detection_name)

    # Create the run event
    image_date = image_hdu.header['DATE_OBS'].split('.')[
        0]  # We don't want the subsecond
    spoca_coronal_hole_run_name = 'SPOCA_CoronalHoleRun_{date}'.format(
        date=image_date)
    run_event = get_spoca_coronal_hole_run(image_date + 'Z',
                                           detection_names,
                                           name=spoca_coronal_hole_run_name)

    return run_event, CH_events
Exemplo n.º 57
0
def add_fffits_metadata(ff_filename, config, platepars_recalibrated,
                        fallback_platepar):
    """
    Add FITS metadata and WCS to FF files generated by RMS

    Args:
        ff_filename (str): full or relative path to FF file
        config (RMS.Config): config instance
        platepars_recalibrated (dict): dictionary with recalibrated platepars
        fallback_platepar (RMS.Platepar): platepar with fitted stars

    Returns:
        None
    """
    ff_basename = os.path.basename(ff_filename)
    platepar_recalibrated = Platepar()
    try:
        platepar_data = platepars_recalibrated[ff_basename]
        with open("platepar_tmp.cal", "w") as f:
            json.dump(platepar_data, f)
        platepar_recalibrated.read("platepar_tmp.cal")
    except (FileNotFoundError, KeyError):
        platepar_recalibrated = fallback_platepar
        logger.warning(f"Using non-recalibrated platepar for {ff_basename}")

    fftime = getMiddleTimeFF(ff_basename, config.fps)

    fit_xy = np.array(fallback_platepar.star_list)[:, 1:3]

    _, fit_ra, fit_dec, _ = xyToRaDecPP([fftime] * len(fit_xy),
                                        fit_xy[:, 0],
                                        fit_xy[:, 1], [1] * len(fit_xy),
                                        platepar_recalibrated,
                                        extinction_correction=False)

    x0 = platepar_recalibrated.X_res / 2
    y0 = platepar_recalibrated.Y_res / 2
    _, ra0, dec0, _ = xyToRaDecPP([fftime], [x0], [y0], [1],
                                  platepar_recalibrated,
                                  extinction_correction=False)
    w = fit_wcs(fit_xy[:, 0],
                fit_xy[:, 1],
                fit_ra,
                fit_dec,
                x0,
                y0,
                ra0[0],
                dec0[0],
                5,
                projection="ZEA")

    hdu_list = fits.open(ff_filename, scale_back=True)
    obstime = Time(filenameToDatetime(ff_basename))

    header_meta = {}
    header_meta["OBSERVER"] = config.stationID.strip()
    header_meta["INSTRUME"] = "Global Meteor Network"
    header_meta["MJD-OBS"] = obstime.mjd
    header_meta["DATE-OBS"] = obstime.fits
    header_meta["NFRAMES"] = 256
    header_meta["EXPTIME"] = 256 / config.fps
    header_meta["SITELONG"] = round(config.longitude, 2)
    header_meta["SITELAT"] = round(config.latitude, 2)

    for hdu in hdu_list:
        if hdu.header[
                "NAXIS"] == 0:  # First header is not an image so should not get WCS
            new_header = Header()
        else:
            new_header = w.to_fits(relax=True)[0].header

        for key, value in header_meta.items():
            new_header.append((key, value))

        for key, value in new_header.items():
            if key in hdu.header:
                continue
            hdu.header[key] = value

    hdu_list.writeto(ff_filename, overwrite=True)
Exemplo n.º 58
0
def main(orbit=None, **kwargs):
    """
    This is the driver program.  Gets the images from VOSpace or the local filesystem.

    expected kwargs:
    pointing, index, chip, rate, angle, ra, dec, p_name, discovery, nstk, zpt, dbimages

    :param kwargs: these are the 'args' sent on the commandline or in an input file.
    :param orbit: the orbit of the object that should be in the frames reference by kwargs provided.
    :type orbit: BKOrbit
    :return: list of observations
    :rtype list(ObsRecod)
    """

    pointing = kwargs['pointing']
    chip = kwargs['chip']
    index = kwargs['index']
    rate = kwargs['rate']
    angle = kwargs['angle']
    ra = kwargs['ra']
    dec = kwargs['dec']
    p_name = kwargs['p_name']
    discovery = kwargs['discovery']
    nstk = kwargs['nstk']
    rejected = kwargs.get('rejected', False)
    zpt = kwargs.get('zpt', 26.9)
    dbimages = kwargs.get('dbimages', 'vos:NewHorizons/dbimages/')

    client = Client()

    int_rate = int(rate * 10)
    int_angle = int((angle % 360) * 10)
    images = []
    # Load the 3 images associated with this point/chip/rate/angle set.
    epoch = None
    for idx in range(nstk):
        expnum = f'{int(pointing)}{int_rate:02d}{int_angle:04d}{idx}'
        image = f'{expnum}p{chip:02d}.fits'
        url = f'{dbimages}/{pointing:05d}/{chip:03d}/{index:04d}/{image}'
        logging.info(f"Looking for image at {url}")
        try:
            if os.access(url, os.R_OK):
                image = url

            elif not os.access(image, os.R_OK):
                # get from VOSpace is not already on disk
                client.copy(url, image)
        except Exception as ex:
            logging.error(str(ex))
            # Return empty set on VOSpace copy error.
            return {}
        images.append(image)
    epoch = Time(fits.open(images[len(images) // 2])[0].header['DATE-AVG'],
                 scale='tai').utc

    regions = f'{dbimages}/{pointing:05d}.reg'
    try:
        if not os.access(regions, os.R_OK):
            regions = client.copy(regions, '.', disposition=True)
    except Exception as ex:
        logging.debug(f"{ex}")
        regions = None

    wcs_dict = {}
    if orbit is not None:
        epoch = orbit.epoch.mjd
    if epoch is None:
        epoch = Time("2020-05-22T00:00:00")

    epoch = Time(kwargs.get('epoch', epoch), format='mjd')

    load_images(images,
                ra,
                dec,
                wcs_dict,
                orbit,
                dra=rate * math.cos(math.radians(angle)),
                ddec=rate * math.sin(math.radians(angle)),
                regions=regions,
                rejected=rejected,
                basedate=epoch)

    obs = measure_image(p_name, images, wcs_dict, discovery=discovery, zpt=zpt)
    return obs
Exemplo n.º 59
0
def diag_t(eventfile, par_list, tbin_size, t1, t2, mode, diag_vars):
    """
    Get the diagnostic plots for a desired time interval.

    eventfile - path to the event file. Will extract ObsID from this for the NICER files.
    par_list - A list of parameters we'd like to extract from the FITS file
    (e.g., from eventcl, PI_FAST, TIME, PI,)
    tbin_size - the size of the time bins (in seconds!)
    >> e.g., tbin_size = 2 means bin by 2s
    >> e.g., tbin_size = 0.05 means bin by 0.05s!
    t1 - lower time boundary
    t2 - upper time boundary
    mode - whether we want to show or save the plot.
    diag_vars - a dictionary where each key = 'att','mkf','hk', or 'cl', and
    diag_vars[key] provides the list of variables to loop over.
    """
    if type(tbin_size) != int and type(tbin_size) != np.float:
        raise TypeError("tbin_size should be a float or integer!")
    if 'PI' and 'TIME' not in par_list:
        raise ValueError(
            "You should have BOTH 'PI' and 'TIME' in the parameter list!")
    if type(par_list) != list and type(par_list) != np.ndarray:
        raise TypeError("par_list should either be a list or an array!")
    if mode != 'show' and mode != 'save':
        raise ValueError("Mode should either be 'show' or 'save'!")

    parent_folder = str(pathlib.Path(eventfile).parent)
    event_header = fits.open(eventfile)[1].header
    obj_name = event_header['OBJECT']
    obsid = event_header['OBS_ID']

    #get the binned light curve
    binned_t, binned_counts = Lv1_data_bin.binning_t(eventfile, par_list,
                                                     tbin_size, t1, t2)

    #define the variables that we'd like to compare their behavior with the light curve
    att_var = diag_vars['att']
    mkf_var = diag_vars['mkf']
    hk_var = diag_vars['hk']

    ### FOR ATTITUDE
    dict_att = Lv0_nicer_housekeeping.get_att(eventfile, att_var)
    times_att = dict_att['TIME']
    shifted_t_att = times_att - times_att[0]
    filtered_t = shifted_t_att[(shifted_t_att >= t1) & (shifted_t_att <= t2)]
    for i in range(1, len(att_var)):  #as in, don't compare time with time...
        filtered_att = dict_att[att_var[i]][(shifted_t_att >= t1)
                                            & (shifted_t_att <= t2)]
        if len(filtered_t) != len(filtered_att):
            raise ValueError(
                "The lengths of arrays filtered t and filtered att for variable "
                + str(att_var[i]) + ' are different, with ' +
                str(len(filtered_t)) + ' and ' + str(len(filtered_att)) +
                ' respectively.')

        if mode == 'show':
            Lv3_diagnostics_display.display_t(eventfile, att_var[i], t1, t2,
                                              binned_t, binned_counts,
                                              filtered_t, filtered_att, '.att')
            plt.show()

    if mode == 'save':
        filename = parent_folder + '/diag_att_' + obsid + '_bin' + str(
            tbin_size) + 's_' + str(t1) + 's-' + str(t2) + 's.pdf'
        with PdfPages(filename) as pdf:
            for i in range(1, len(att_var)):
                filtered_att = dict_att[att_var[i]][(shifted_t_att >= t1)
                                                    & (shifted_t_att <= t2)]
                Lv3_diagnostics_display.display_t(eventfile, att_var[i], t1,
                                                  t2, binned_t, binned_counts,
                                                  filtered_t, filtered_att,
                                                  '.att')
                pdf.savefig()
                plt.close()

    ### FOR FILTER
    dict_mkf = Lv0_nicer_housekeeping.get_mkf(eventfile, mkf_var)
    times_mkf = dict_mkf['TIME']
    shifted_t_mkf = times_mkf - times_mkf[0]
    filtered_t = shifted_t_mkf[(shifted_t_mkf >= t1) & (shifted_t_mkf <= t2)]
    for i in range(1, len(mkf_var)):  #as in, don't compare time with time...
        filtered_mkf = dict_mkf[mkf_var[i]][(shifted_t_mkf >= t1)
                                            & (shifted_t_mkf <= t2)]
        if len(filtered_t) != len(filtered_mkf):
            raise ValueError(
                "The lengths of arrays filtered t and filtered mkf for variable "
                + str(mkf_var[i]) + ' are different, with ' +
                str(len(filtered_t)) + ' and ' + str(len(filtered_mkf)) +
                ' respectively.')

        if mode == 'show':
            Lv3_diagnostics_display.display_t(eventfile, mkf_var[i], t1, t2,
                                              binned_t, binned_counts,
                                              filtered_t, filtered_mkf, '.mkf')
            plt.show()

    if mode == 'save':
        filename = parent_folder + '/diag_mkf_' + obsid + '_bin' + str(
            tbin_size) + 's_' + str(t1) + 's-' + str(t2) + 's.pdf'
        with PdfPages(filename) as pdf:
            for i in range(1, len(mkf_var)):
                filtered_mkf = dict_mkf[mkf_var[i]][(shifted_t_mkf >= t1)
                                                    & (shifted_t_mkf <= t2)]
                Lv3_diagnostics_display.display_t(eventfile, mkf_var[i], t1,
                                                  t2, binned_t, binned_counts,
                                                  filtered_t, filtered_mkf,
                                                  '.mkf')
                pdf.savefig()
                plt.close()

    ### FOR HK
    if mode == 'show':
        for i in range(7):
            dict_hk = Lv0_nicer_housekeeping.get_hk(eventfile, str(i), hk_var)
            times_hk = dict_hk['TIME']
            shifted_t_hk = times_hk - times_hk[0]
            filtered_t = shifted_t_hk[(shifted_t_hk >= t1)
                                      & (shifted_t_hk <= t2)]
            for j in range(
                    1, len(hk_var)):  #as in, don't compare time with time...
                filtered_hk = dict_hk[hk_var[j]][(shifted_t_hk >= t1)
                                                 & (shifted_t_hk <= t2)]
                if len(filtered_t) != len(filtered_hk):
                    raise ValueError(
                        "The lengths of arrays filtered t and filtered att for variable "
                        + str(hk_var[j]) + ' are different, with ' +
                        str(len(filtered_t)) + ' and ' +
                        str(len(filtered_hk)) +
                        ' respectively. This is for HK MPU=' + str(i))
                Lv3_diagnostics_display.display_t(obsid, hk_var[j], t1, t2,
                                                  binned_t, binned_counts,
                                                  filtered_t, filtered_hk,
                                                  ['.hk', str(i)])
                plt.show()

    if mode == 'save':
        filename = parent_folder + '/diag_hk_' + obsid + '_bin' + str(
            tbin_size) + 's_' + str(t1) + 's-' + str(t2) + 's.pdf'
        with PdfPages(filename) as pdf:
            for i in range(7):
                dict_hk = Lv0_nicer_housekeeping.get_hk(
                    eventfile, str(i), hk_var)
                times_hk = dict_hk['TIME']
                shifted_t_hk = times_hk - times_hk[0]
                filtered_t = shifted_t_hk[(shifted_t_hk >= t1)
                                          & (shifted_t_hk <= t2)]
                for j in range(
                        1,
                        len(hk_var)):  #as in, don't compare time with time...
                    filtered_hk = dict_hk[hk_var[j]][(shifted_t_hk >= t1)
                                                     & (shifted_t_hk <= t2)]
                    if len(filtered_t) != len(filtered_hk):
                        raise ValueError(
                            "The lengths of arrays filtered t and filtered att for variable "
                            + str(hk_var[j]) + ' are different, with ' +
                            str(len(filtered_t)) + ' and ' +
                            str(len(filtered_hk)) +
                            ' respectively. This is for HK MPU=' + str(i))
                    Lv3_diagnostics_display.display_t(eventfile, hk_var[j], t1,
                                                      t2, binned_t,
                                                      binned_counts,
                                                      filtered_t, filtered_hk,
                                                      ['.hk', str(i)])
                    pdf.savefig()
                    plt.close()

    ### FOR EVENT_CL (BARY)
    data_dict = Lv0_fits2dict.fits2dict(eventfile, 1, par_list)
    times_cl = data_dict['TIME']
    shifted_t_cl = times_cl - times_cl[0]
    filtered_t = shifted_t_cl[(shifted_t_cl >= t1) & (shifted_t_cl <= t2)]
    for i in range(1, len(par_list)):  #as in, don't compare time with time...
        filtered_cl = data_dict[par_list[i]][(shifted_t_cl >= t1)
                                             & (shifted_t_cl <= t2)]
        if len(filtered_t) != len(filtered_cl):
            raise ValueError(
                "The lengths of arrays filtered t and filtered cl for variable "
                + str(eventcl_var[i]) + ' are different, with ' +
                str(len(filtered_t)) + ' and ' + str(len(filtered_cl)) +
                ' respectively.')

        if mode == 'show':
            Lv3_diagnostics_display.display_t(eventfile, par_list[i], t1, t2,
                                              binned_t, binned_counts,
                                              filtered_t, filtered_cl, '.cl')
            plt.show()

    if mode == 'save':
        filename = parent_folder + '/diag_cl_' + obsid + '_bin' + str(
            tbin_size) + 's_' + str(t1) + 's-' + str(t2) + 's.pdf'
        with PdfPages(filename) as pdf:
            for i in range(1, len(par_list)):
                filtered_cl = data_dict[par_list[i]][(shifted_t_cl >= t1)
                                                     & (shifted_t_cl <= t2)]
                Lv3_diagnostics_display.display_t(eventfile, par_file[i], t1,
                                                  t2, binned_t, binned_counts,
                                                  filtered_t, filtered_cl,
                                                  '.cl')
                pdf.savefig()
                plt.close()
Exemplo n.º 60
0
path = '/Users/baotong/Desktop/period_gc/'
p90_list = 'mod_expweighted_90_mean_i4.psfmap'
srcname_list = path + 'combineobs_info_box.txt'

phy_x = np.loadtxt(srcname_list)[:, 2]
phy_y = np.loadtxt(srcname_list)[:, 3]

phy_x = np.rint(phy_x)
phy_y = np.rint(phy_y)
phy_x_int = phy_x.astype(np.int)
phy_y_int = phy_y.astype(np.int)

src_x = phy_x_int - 2896
src_y = phy_y_int - 2896
#将physical坐标转换为img坐标
hdul_p90 = fits.open(path + p90_list)

p90_data = hdul_p90[0].data
p90_data = p90_data.T
src_radius = p90_data[src_x, src_y]
src_radius *= 2.03252
#src_radius*=2
#pixel size=0.492arcsec
#the default units of p90 fits is arcsec, 1/0.492=2.03252
print(np.sort(src_radius))
for i in range(len(phy_x)):
    with open(path + 'region/{0}.reg'.format(i + 1), 'w+') as f1:
        reg = 'circle(' + str(phy_x[i]) + ',' + str(phy_y[i]) + ',' + str(
            src_radius[i]) + ')'
        f1.writelines(reg)
    with open(path + 'region/all.reg', 'a+') as f2: