コード例 #1
0
ファイル: spector.py プロジェクト: aileisun/bubblepy
	def make_linefrac(self, band, lines=['NeIII3870', 'NeIII3969', 'Hg', 'Hb', 'OIII4960', 'OIII5008', 'OI6302', 'OI6366'], tofixOIIIratio=True, overwrite=False):
		"""
		make file spec_linefrac.csv that contains the fraction each of the strong lines have in a specific band.
		Columns: f_{band}_{line}, T_{band}_{line}, w_{band}_{line}, frac_{band}_{line}

		The fraction is based on the f*T*w of the line. Only the strong lines are listed. 

		If tofixOIIIratio = True, then the ratio between OIII5008 and OIII4960 is fixed to the theoretical ratio of 2.98, see 
		Storey + 2000. http://adsabs.harvard.edu/abs/2000MNRAS.312..813S. 

		Params
		------
		band
		lines=['NeIII3870', 'NeIII3969', 'Hg', 'Hb', 'OIII4960', 'OIII5008', 'OI6302', 'OI6366']
		tofixOIIIratio=True
		overwrite=False

		Return
		------
		status
		"""
		fn = self.fp_spec_linefrac

		self.make_lineflux(overwrite=overwrite)

		if not os.path.isfile(fn) or overwrite:
			print("[spector] making spec_linefrac")

			tab = at.Table([[band]], names=['lineband'])

			fwt_sum = 0.
			for line in lines:
				f, __ = self._get_line_flux(line=line, wunit=False)
				w = self._get_line_obs_wave(line=line, wunit=False)
				T = self._get_norm_trans(wavelength=w, band=band, bounds_error=False)

				fwt = max(f*w*T, 0)
				fwt_sum = fwt_sum + fwt

				col_new = at.Table([[f], [w], [T], [fwt]], names=['f_{}'.format(line), 'w_{}'.format(line), 't_{}'.format(line), 'fwt_{}'.format(line)])
				tab = at.hstack([tab, col_new])

			for line in lines:
				frac = tab['fwt_{}'.format(line)][0] / fwt_sum
				col_new = at.Table([[frac]], names=['frac_{}'.format(line)])
				tab = at.hstack([tab, col_new])

			if tofixOIIIratio:
				r = 2.98
				frac_OIIItotal = tab['frac_OIII4960'] + tab['frac_OIII5008']
				tab['frac_OIII5008'] = frac_OIIItotal * r / (1.+r)
				tab['frac_OIII4960'] = frac_OIIItotal * 1. / (1.+r)

			tab.write(fn, format='ascii.csv', overwrite=overwrite)

		else:
			print("[spector] skip making spec_linefrac as file exists")

		status = os.path.isfile(fn)
		return status 
コード例 #2
0
ファイル: test_mixin.py プロジェクト: Cadair/astropy
def test_hstack(table_types):
    """
    Hstack tables with mixin cols.  Use column "i" as proxy for what the
    result should be for each mixin.
    """
    t1 = table_types.Table()
    t1['i'] = table_types.Column([0, 1, 2, 3])
    for name, col in MIXIN_COLS.items():
        t1[name] = col
        t1[name].info.description = name
        t1[name].info.meta = {'a': 1}

    for join_type in ('inner', 'outer'):
        for chop in (True, False):
            t2 = table_types.Table(t1)
            if chop:
                t2 = t2[:-1]
                if join_type == 'outer':
                    with pytest.raises(NotImplementedError) as exc:
                        t12 = hstack([t1, t2], join_type=join_type)
                    assert 'hstack requires masking column' in str(exc.value)
                    continue

            t12 = hstack([t1, t2], join_type=join_type)
            idx1 = t12['i_1']
            idx2 = t12['i_2']
            for name, col in MIXIN_COLS.items():
                name1 = name + '_1'
                name2 = name + '_2'
                assert_table_name_col_equal(t12, name1, col[idx1])
                assert_table_name_col_equal(t12, name2, col[idx2])
                for attr in ('description', 'meta'):
                    assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr)
                    assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr)
コード例 #3
0
ファイル: compare_bands.py プロジェクト: richardgmcmahon/DES
def des_joinbands(tile=None, release=None, suffix='r2587p01'):

        infile = '/data/desardata/'+release+'/'+tile+'/'+tile+'_g_cat.fits'
        if release == 'Y3A1':
            infile = '/data/desardata/' + release + '/' + tile + '/' + tile+ '_' + suffix + '_g_cat.fits'
        g = Table.read(infile)

        infile = '/data/desardata/'+release+'/'+tile+'/'+tile+'_r_cat.fits'
        if release == 'Y3A1':
            infile = '/data/desardata/' + release + '/' + tile + '/' + tile+ '_' + suffix + '_r_cat.fits'
        r = Table.read(infile)

        infile = '/data/desardata/'+release+'/'+tile+'/'+tile+'_i_cat.fits'
        if release == 'Y3A1':
            infile = '/data/desardata/' + release + '/' + tile + '/' + tile+ '_' + suffix + '_i_cat.fits'
        i = Table.read(infile)

        infile = '/data/desardata/'+release+'/'+tile+'/'+tile+'_z_cat.fits'
        if release == 'Y3A1':
            infile = '/data/desardata/' + release + '/' + tile + '/' + tile+ '_' + suffix + '_z_cat.fits'
        z = Table.read(infile)

        infile = '/data/desardata/'+release+'/'+tile+'/'+tile+'_Y_cat.fits'
        if release == 'Y3A1':
            infile = '/data/desardata/' + release + '/' + tile + '/' + tile+ '_' + suffix + '_Y_cat.fits'
        Y = Table.read(infile)

        # stack all the columns across the DES wavebands
        t = hstack([g,r,i,z,Y], table_names=['G','R','I','Z','Y'])

        t.info()

        t.write('/data/desardata/'+release+'/'+tile+'/'+tile+'_merged_cat.fits', overwrite=True)

        print(release, tile, 'done')
コード例 #4
0
ファイル: zphot.py プロジェクト: nicolaschotard/Clusters
    def save_zphot(self, file_out, path_output, overwrite=False):
        """Save the output of photoz code (z_best, chi^2, pdz) into astropy table."""
        # Duplicates the zbins vector for each object.
        # It is redundant information but astropy tables need each field to have the
        # same size. Or maybe I'm missing something.
        zbins = N.tile(self.pdz_zbins, (len(self.kwargs['id']), 1))

        # Converts LePhare or BPZ likelihood to actual probability density
        for i in N.arange(len(self.pdz_val.T)):
            norm = N.trapz(self.pdz_val[:, i], self.pdz_zbins)
            new_pdz_val = self.pdz_val[:, i] / norm
            self.pdz_val[:, i] = new_pdz_val

        # Creates astropy table to be saved in path_output of file_out
        new_tab = hstack([Table([self.kwargs['id']]), Table([self.kwargs['ra']]),
                          Table([self.kwargs['dec']]), Table(self.data_dict),
                          Table([zbins], names=['zbins']),
                          Table([self.pdz_val.T], names=['pdz'])],
                         join_type='inner')

        # Rename BPZ Z_B to Z_BEST to match LePhare
        if 'Z_B' in new_tab.keys():
            new_tab.rename_column('Z_B', 'Z_BEST')

        # overwrite keyword of data.write(file,path) does not only overwrites
        # the data in path, but the whole file, i.e. (we lose all other
        # paths in the process) --> overwrite_or_append (see above)

        cutils.overwrite_or_append(file_out, path_output, new_tab, overwrite=overwrite)

        print("INFO: ", self.code, "data saved in", file_out, "as", path_output)
コード例 #5
0
def add_DEScat(tile, outdir, filename_suffix='_WISEfp',
               radius_match=5.0,
               overwrite=False):

    from astropy.table import Table, hstack
    import numpy as np
    import match_lists

    config = configparser.RawConfigParser()
    config.read(config_file)

    release = config.get("des", "release")

    infile = datapath + tile + "/" + tile + ".fits"

    logger.debug('Read: %s', infile)

    t_cat = Table.read(infile)
    t_fp = Table.read(outdir + tile + "_WISEfp.fits")

    dists, inds = match_lists.match_lists(
        t_cat["ALPHAWIN_J2000_G"].data, t_cat["DELTAWIN_J2000_G"].data,
        t_fp["RA_CALC_G"].data, t_fp["DEC_CALC_G"].data,
        radius_match / 3600.0, 1)

    ids = np.where((inds != len(t_fp)))[0]
    t_cat = t_cat[ids]
    t_fp = t_fp[inds[ids]]
    t_out = hstack([t_fp, t_cat])
    t_out.write(outdir + tile + "_WISEfp_DEScat.fits",
                overwrite=overwrite)
コード例 #6
0
ファイル: NeStarPar.py プロジェクト: pacargile/NeStarPar
	def __init__(self,stripeindex=None):
 		if stripeindex == None:
 			BCfile = MISTFILE_default
 		else:
 			BCfile = '/n/regal/conroy_lab/pac/MISTFILES/MIST_full_{0}.h5'.format(stripeindex)

 		# read in MIST hdf5 table
 		MISTh5 = h5py.File(BCfile,'r')
 		# determine the BC datasets
 		BCTableList = [x for x in MISTh5.keys() if x[:3] == 'BC_']
 		# read in each BC dataset and pull the photometric information
 		for BCT in BCTableList:
	 		BCTABLE = Table(np.array(MISTh5[BCT]))
			if BCT == BCTableList[0]:
				BC = BCTABLE.copy()
			else:
				BCTABLE.remove_columns(['Teff', 'logg', '[Fe/H]', 'Av', 'Rv'])
				BC = hstack([BC,BCTABLE])

 		BC_AV0 = BC[BC['Av'] == 0.0]

		self.bands = BC.keys()
		[self.bands.remove(x) for x in ['Teff', 'logg', '[Fe/H]', 'Av', 'Rv']]

		self.redintr = LinearNDInterpolator(
			(BC['Teff'],BC['logg'],BC['[Fe/H]'],BC['Av']),
			np.stack([BC[bb] for bb in self.bands],axis=1),
			rescale=True
			)
		self.redintr_0 = LinearNDInterpolator(
			(BC_AV0['Teff'],BC_AV0['logg'],BC_AV0['[Fe/H]']),
			np.stack([BC_AV0[bb] for bb in self.bands],axis=1),
			rescale=True
			)
コード例 #7
0
ファイル: A4200obj.py プロジェクト: PerihelionMind/Astro4200
 def calculate_fluxes(self,stardata,apertures,aper_annulus):
     '''
         Calculates the photon flux in an aperture, and an annulus
         around the aperture to subtract the background.
         
         As far as I can tell, the output value is still just a 'photon count', not technically a 
         photon flux. Possible modifications will start here, maybe uncommenting the apertures.area()
         which calculates the aperture phot_count divided by area of aperture.
         giving photons per area.
         
         I think we would further have to supplement that by dividing by the exposure time, and some 
         other wavelength value I cant think of, to get PHOTON FLUX ( photons per sec per cm^2 per wavelength)
     '''
     flux_table = aperture_photometry(stardata, apertures)
     bkg_table  = aperture_photometry(stardata, aper_annulus)
     
     phot_table = hstack([flux_table, bkg_table], table_names=['raw','bkg'])
     bkg_mean = phot_table['aperture_sum_bkg'] / aper_annulus.area()
     
     bkg_sum = bkg_mean * apertures.area()
     final_sum = phot_table['aperture_sum_raw'] - bkg_sum
     phot_table['residual_aperture_sum'] = final_sum
     #phot_table['res_aper_div_area'] = final_sum/apertures.area()
     #print(phot_table)
     return self.cut_vals(phot_table)
コード例 #8
0
def catalog_match(pubcat_file, erscat_file, match_out_file, match_tol = 1.0):
    ''' matches combined NED/SIMBAD file to ERS source list
    '''
    pubcat = Table.read(pubcat_file, format = 'fits')
    erscat = Table.read(erscat_file, format='ascii.commented_header')

    # construct coordinates needed for matching
    pub_coo = SkyCoord(ra=pubcat['RA'], dec=pubcat['Dec'])
    ers_coo = SkyCoord(ra=erscat['ra']*u.degree, dec=erscat['dec']*u.degree) 

    # do the matching
#    closest_2to1, sep2d_2to1, sep3d = match_coordinates_sky(coord1, coord2) # location in coord2 for closest match to each coord1. len = len(coord1)
    closest, sep2d, sep3d = match_coordinates_sky(pub_coo, ers_coo) # location in coord2 for closest match to each coord1. len = len(coord1)
    matched  = sep2d < match_tol*u.arcsec
#    matched_ers, matched_pub, ers_only, pub_only = symmetric_match_sky_coords(ers_coo, pub_coo, match_tol*u.arcsec)

    # generate the matched table
    keeplist = ['id_','ra','dec']
    tmpcat = Table(erscat[keeplist])
    matchtab = hstack([tmpcat[closest][matched], pubcat[matched]], join_type = 'outer')

    # write the matched catalog to a file
    matchtab.write(match_out_file, format = 'ascii.commented_header')

    return
コード例 #9
0
ファイル: isomeasurer.py プロジェクト: aileisun/bubblepy
	def _get_tab_measurements_from_contours(self, contours, xc, yc):
		""" 
		calculate iso measurements from contours, return a table like: 
		"""

		tab = polytools.ShapeParamsTab_from_contours(contours, xc, yc)

		# unit conversion
		area_ars = tab['area_pix'][0]*(self.pixsize/u.arcsec)**2
		dmax_ars = self._pix_to_theta(tab['dmax_pix'][0], wunit=False)
		rmax_ars = self._pix_to_theta(tab['rmax_pix'][0], wunit=False)
		dper_ars = self._pix_to_theta(tab['dper_pix'][0], wunit=False)

		kpc_per_arcsec = np.array(self._get_kpc_proper_per_arcsec())

		area_kpc = area_ars * kpc_per_arcsec**2
		dmax_kpc = dmax_ars * kpc_per_arcsec
		rmax_kpc = rmax_ars * kpc_per_arcsec
		dper_kpc = dper_ars * kpc_per_arcsec

		tab_converted = at.Table(names=['area_kpc', 'dmax_kpc', 'rmax_kpc', 'dper_kpc', 'area_ars', 'dmax_ars', 'rmax_ars', 'dper_ars', ])
		tab_converted.add_row([area_kpc, dmax_kpc, rmax_kpc, dper_kpc, area_ars, dmax_ars, rmax_ars, dper_ars, ])

		tabout = at.hstack([tab_converted, tab])

		return tabout
コード例 #10
0
def assign_num(args):
    """Assigns individual identification number to each object"""
    seed =122
    np.random.seed(seed)
    print "Assigning number"
    names = ('SEG_ID', 'NUMBER', 'IDENT')
    dtype = ('string', 'int', 'int') 
    index_table = Table(names=names, dtype = dtype)
    ident = 0
    #objects detected are same in all filters. So getting objects in first filter
    #is sufficient
    filt = args.filter_names[0]
    all_seg_ids = np.loadtxt(args.seg_list_file, delimiter=" ",dtype='S2')
    for seg_id in all_seg_ids:
        file_name = args.main_path + seg_id + '/' + filt + '_with_pstamp.fits'
        catalog = Table.read(file_name, format='fits')
        idents = range(ident,ident+len(catalog))
        seg_ids = [seg_id]*len(catalog)
        numbers = catalog['NUMBER']       
        temp = Table([seg_ids, numbers,idents],names=names, dtype = dtype)
        index_table = vstack([index_table,temp])
        ident+=len(catalog)
    shuffle_idents = range(len(index_table))
    np.random.shuffle(shuffle_idents)
    index_table= index_table[shuffle_idents]
    order_idents = range(len(index_table))
    file_nums = np.array(order_idents)/1000 + 1
    hdus= np.zeros(len(order_idents))
    names = ('ORDER', 'FILE_NUM', 'HDU')
    dtype = ('int' ,'int', 'int')
    temp = Table([order_idents,file_nums,hdus], names=names, dtype=dtype)
    index_table = hstack([index_table,temp])
    cat_name = args.main_path + 'index_table_' + args.cat_name.replace('filter', '')
    return index_table
コード例 #11
0
def join_cats(tile, outdir, overwrite=False):
    """

    """
    from astropy.table import Table, hstack

    logger = logging.getLogger()

    t_out = Table.read(outdir + tile + "_WISEfp_g_cal.fits")

    for col in t_out.columns:
        t_out.rename_column(col, col + "_G")

    for band in ["r", "i", "z", "Y"]:
        fp_file = outdir + tile + "_WISEfp_" + band + "_cal.fits"
        t = Table.read(fp_file)

        for col in t.columns:
            t.rename_column(col, col + "_" + band.upper())

        # stack onto end of the table
        t_out = hstack([t_out, t])

    outfile = outdir + tile + "_WISEfp.fits"
    # add a counter to the WISE list
    t_out["WISE_NUM"] = np.arange(1, len(t_out) + 1)
    t_out.write(outfile, overwrite=overwrite)

    logger.debug('Join complete: %s', outfile)
コード例 #12
0
ファイル: getzrange_batch.py プロジェクト: aileisun/bubblepy
def append_tabdata_to_tabout(tabdata, tbandconfig, tabout):
    tabrow = at.hstack([tbandconfig, tabdata])

    if len(tabout)==0: 
        tabout=tabrow
    else: 
        tabout=at.vstack([tabout,tabrow])
    return tabout
コード例 #13
0
def add_kernel_DEScat(tile, outdir, kernel_path, suffix_for_kernelcats):

	from astropy.table import Table, hstack
	import numpy as np
	import match_lists
	import astropy.io.fits as fits
	import subprocess
	import os
	config = ConfigParser.RawConfigParser()
	config.read(config_file)

	tile_header = fits.open('/data/desardata/Y1A1/'+str(tile)+'/'+str(tile)+'_g.fits.fz')[1].header
	RA_centre = tile_header['CRVAL1']
	DEC_centre = tile_header['CRVAL2']
	colours = ["g","r","i","z","Y"]
	for colour in colours:
		if not os.path.exists('/data/desardata/Y1A1/'+str(tile)+'/'+str(tile)+'_'+str(colour)+'.fits'):
			subprocess.call(['bash','/data/cl522/WISE2DES/kernelised_extraction/funpack.sh','/data/desardata/Y1A1/'+str(tile)+'/'+str(tile)+'_'+str(colour)+'.fits.fz'])

	subprocess.call(['bash','/data/cl522/WISE2DES/kernelised_extraction/sextractor_script.sh',str(tile),str(RA_centre),str(DEC_centre),str(kernel_path),str(suffix_for_kernelcats),str(outdir)])

	for (n, band) in enumerate(["G", "R", "I", "Z"]): #, "Y"]): removed because final fits file must have < 1000 columns :/
		if n!=4:
			t = Table.read(outdir+"/"+tile+"_"+band.lower()+"_cat"+suffix_for_kernelcats+".fits")
		if n==4:
			t = Table.read(outdir+"/"+tile+"_"+band+"_cat"+suffix_for_kernelcats+".fits")
		for col in t.columns:
			t.rename_column(col, col + "_" + band)
		if n == 0:
			t_cat = t
		else:
			t_cat = hstack([t_cat, t])
	t_cat.write(outdir+"/"+tile+suffix_for_kernelcats+".fits", overwrite = True)

	release = config.get("des", "release")
	t_cat = Table.read(outdir+"/"+tile+suffix_for_kernelcats+".fits")
	t_fp = Table.read(outdir + tile + "_WISEfp.fits")

	dists, inds = match_lists.match_lists(t_cat["ALPHAWIN_J2000_G"].data, t_cat["DELTAWIN_J2000_G"].data, t_fp["RA_CALC_G"].data, t_fp["DEC_CALC_G"].data, 5.0/3600.0, 1)

	ids = np.where( (inds <> len(t_fp)) )[0]
	t_cat = t_cat[ids]
	t_fp = t_fp[inds[ids]]
	t_out = hstack([t_fp, t_cat])
	t_out.write(outdir + "/" + tile + "_WISEfp_DEScat"+suffix_for_kernelcats+".fits", overwrite = True)
コード例 #14
0
ファイル: ebosscore.py プロジェクト: imcgreer/simqso
def apply_selection_fun(fileName,verbose=0,redo=False,procMap=None,nsplit=1):
    xdFile = fileName.replace('.fits','__xdtmp.fits')
    qsos = Table.read(fileName)
    if 'PQSO' not in qsos.colnames or redo:
        if nsplit == 1:
            run_xdqso( (fileName,xdFile,verbose) )
            xdqso = Table.read(xdFile)
            os.remove(xdFile)
        else:
            splits = np.array_split(np.arange(len(qsos)),nsplit)
            procArgs = []
            for i,split in enumerate(splits):
                tmpFn = fileName.replace('.fits','__%02d.fits'%i)
                tmpXdFn = xdFile.replace('.fits','__%02d.fits'%i)
                qsos[split].write(tmpFn,overwrite=True)
                procArgs.append( (tmpFn,tmpXdFn,verbose) )
            procMap(run_xdqso,procArgs)
            xdqso = []
            for i,(tmpFn,tmpXdFn,_) in enumerate(procArgs):
                xdqso.append( Table.read(tmpXdFn) )
                os.remove(tmpFn)
                os.remove(tmpXdFn)
            xdqso = vstack(xdqso)
        if redo and 'PQSO' in qsos.colnames:
            qsos.remove_columns(xdqso.colnames)
        for col in xdqso.colnames:
            xdqso[col] = xdqso[col].astype(np.float32)
        qsos = hstack([qsos,xdqso])
    b = BandIndexes(qsos)
    # populate the fields needed for the mid-IR-optical color cuts
    qsos = get_sim_optwise_mags(qsos)
    #
    sel = True
    if verbose:
        print("{:7d} quasars at start".format(len(qsos)))
    # roughly the FIBER2MAG i>17 cut
    sel &= qsos['obsMag'][:,b('i')] > 17
    if verbose:
        print("{:7d} after fiberMag_i>17 cut".format(sel.sum()))
    # g < 22 OR r < 22
    sel &= ( (qsos['obsMag'][:,b('g')] < 22) | 
             (qsos['obsMag'][:,b('r')] < 22) )
    if verbose:
        print("{:7d} after g<22 OR r<22 cut".format(sel.sum()))
    # XDQSOz probability cut
    sel &= qsos['PQSO'] > 0.2
    if verbose:
        print("{:7d} after XDQSOz cut".format(sel.sum()))
    # optical--mid-IR color cut
    sel &= ( qsos['f_WISE']*qsos['obsFlux'][:,b('g')] >
              qsos['f_opt']*qsos['obsFlux'][:,b('i')]*10**(3.0/2.5) )
    if verbose:
        print("{:7d} after optical--mid-IR color cut".format(sel.sum()))
    #
    qsos['selected'] = sel
    qsos.write(fileName,overwrite=True)
コード例 #15
0
ファイル: spector.py プロジェクト: aileisun/bubblepy
	def make_spec_mag(self, overwrite=False):
		"""
		make table spec_mag.csv that contains the convolved spectral magnitude and fnu in each band

		Params
		------
		self
		overwrite=False

		Return
		------
		status
		"""
		#==========================================================================

		fn = self.fp_spec_mag

		self.make_spec_decomposed_ecsv(overwrite=False)

		if not os.path.isfile(fn) or overwrite:
			print("[spector] making spec_mag")
			tabmag = at.Table()
			tabfnu = at.Table()

			for component in ['all', 'cont', 'line', 'contextrp']:
				for band in self.bands:
					colfnu = self.__get_specmag_colname(band, component=component, fluxquantity='fnu')
					colmag = self.__get_specmag_colname(band, component=component, fluxquantity='mag')
					try:
						fnu = self._calc_Fnu_in_band(band=band, component=component)
					except KeyboardInterrupt:
						sys.exit(0) 
					except:
						print(("[spector] skip calculating fnu of {} in band {}".format(component, band)))
					else: 
						mag = fnu.to(u.ABmag)
						fnu_nm = fnu.to(u.nanomaggy)				
						tabmag[colmag] = [mag.value]
						tabfnu[colfnu] = [fnu_nm.value]

			tab = at.hstack([tabmag, tabfnu])
			tab.meta['comments'] = [
									"survey_photo: {}".format(self.survey),
									"survey_spec: {}".format(self.survey_spec),
									"unit_mag: ABmag",
									"unit_fnu: nanomaggy",
									]

			tab.write(fn, comment='#', format='ascii.csv', overwrite=overwrite)
		else:
			print("[spector] skip making spec_mag as file exists")

		status = os.path.isfile(fn)
		return status 
コード例 #16
0
ファイル: libsampler.py プロジェクト: jonathansick/sedbot
    def table(self):
        """A :class:`MultiPixelChain` representing the Gibbs chain.

        The intention is for the library sampler to have a data output
        similar to the MH-in-Gibbs sampler.
        """
        meta = OrderedDict((
            ('observed_bands', self.model.observed_bands),
            ('instruments', self.model.instruments),
            ('computed_bands', self.model.library_bands),
            ('msun_ab', self.model.msun_ab),
            ('band_indices', self.model.band_indices),
            ('theta_params', self.model.theta_params),
            ('meta_params', self.model.meta_params),
            ('sed', self.model._seds),
            ('sed_err', self.model._errs),
            ('pixels', self.model.pixel_metadata),
            ('area', self.model._areas),
            ('d', self.model.d)))

        # Make tables for theta chain, B chain, and blob chain (that includes
        # SED and M/L datasets)
        # TODO we should probably just change the chain dtypes instead
        # original table - (step, pixel, parameter)
        # output table - (step, parameter, pixel)
        theta_table = Table(np.swapaxes(self.theta_chain, 1, 2),
                            names=self.model.theta_params,
                            meta=meta)

        background_names = ["B__{0}__{1}".format(n, b)
                            for n, b in zip(self.model.instruments,
                                            self.model.observed_bands)]
        B_table = Table(self.B_chain, names=background_names)

        meta_table = Table(np.swapaxes(self.blob_chain, 1, 2),
                           names=self.model.meta_params)

        # model_sed column is of shape (step, pixel, band)
        # conforms to MultiPixelChain standard
        meta_table.add_column(Column(name='model_sed',
                                     data=self.sed_chain))

        # output table (step, band, pixel)
        # name columns to be different from flux
        ml_names = ["logML_{0}".format(b) for b in self.model.library_bands]
        ml_table = Table(np.swapaxes(self.ml_chain, 1, 2),
                         names=ml_names)

        tbl = MultiPixelChain(hstack((theta_table,
                                      B_table,
                                      meta_table,
                                      ml_table)))

        return tbl
コード例 #17
0
ファイル: tabtools.py プロジェクト: aileisun/bubblepy
def summarize(fn_in, fn_out, columns=[], condi={}, overwrite=False):
	"""
	Summarize the table 'fn_in' and write the results to 'fn_out'. 
	For each of the column in columns, take the mean, std, median, and 16%, 84% quantile. 
	All the other columns that are not specified in columns and condi are ignored. 

	Params
	------
	fn_in
	fn_out
	columns=[] (list of string)
		list of column names, e.g., ['area_ars', 'dmax_ars']. Default: all columns. 
	condi={} 
		conditions, e.g. {'imgtag': 'OIII5008_I'}
	overwrite=False

	Return
	------
	status (bool)
	"""
	if not os.path.isfile(fn_out) or overwrite:
		tab_in = at.Table.read(fn_in)

		if len(condi)>0:
			tab_select = tab_extract_row(tab_in, condi=condi)
			tab_sum = tab_select[list(condi.keys())][0] # creating headers
		else: 
			tab_select = tab_in
			tab_sum = at.Table() # no headers

		if len(columns)==0:
			columns = tab_in.colnames

		# calculation
		for col in columns:
			if not col in list(condi.keys()):
				arr = tab_select[col]
				if arr.dtype in [float, int]:
					var_mean = np.mean(arr)
					var_std = np.std(arr)
					var_median = np.median(arr)
					var_p16 = np.percentile(arr, 16)
					var_p84 = np.percentile(arr, 84)

					tab_stat = at.Table([[var_mean], [var_std], [var_median], [var_p16], [var_p84], ], names=[col+tag for tag in ['_mean', '_std', '_median', '_p16', '_p84']])
					tab_sum = at.hstack([tab_sum, tab_stat])

		tab_sum.write(fn_out, overwrite=overwrite)

	else: 
		print("[tabtools] skip summarizing as files exist")

	return os.path.isfile(fn_out)
コード例 #18
0
ファイル: sampler.py プロジェクト: jonathansick/sedbot
    def table(self):
        """An :class:`astropy.table.Table` with the chain."""
        msuns = np.array([fsps.get_filter(n).msun_ab
                          for n in self._model.computed_bands])
        theta_f_accept = json.dumps(dict(zip(self._model.theta_params,
                                             self.median_theta_faccept)))
        phi_f_accept = json.dumps(dict(zip(self._model.phi_params,
                                           self.phi_faccept)))
        meta = OrderedDict((
            ('theta_f_accept', theta_f_accept),
            ('phi_f_accept', phi_f_accept),
            ('observed_bands', self._model.observed_bands),
            ('instruments', self._model.instruments),
            ('computed_bands', self._model.computed_bands),
            ('msun_ab', msuns),
            ('band_indices', self._model.band_indices),
            ('theta_params', self._model.theta_params),
            ('phi_params', self._model.phi_params),
            ('theta_proposal_sigma', self._theta_prop),
            ('phi_proposal_sigma', self._theta_prop),
            ('sed', self._model._seds),
            ('sed_err', self._model._errs),
            ('pixels', self._model.pixel_metadata),
            ('area', self._model._areas)))
        # Make tables for individual chains; stack later
        # FIXME should axis order be changed for theta throughout the sampler?
        # or I can just continue to swarp aces here
        theta_table = Table(np.swapaxes(self.theta, 1, 2),
                            names=self._model.theta_params,
                            meta=meta)
        phi_table = Table(self.phi, names=self._model.phi_params)
        background_names = ["B__{0}__{1}".format(n, b)
                            for n, b in zip(self._model.instruments,
                                            self._model.observed_bands)]
        B_table = Table(self.B, names=background_names)
        blob_table = Table(self.blobs)
        tbl = MultiPixelChain(hstack((theta_table,
                                      phi_table,
                                      B_table,
                                      blob_table)))

        # Add M/L computations for each computed band.
        for i, (band_name, msun) in enumerate(zip(self._model.computed_bands,
                                                  msuns)):
            logLsol = micro_jy_to_luminosity(tbl['model_sed'][:, :, i],
                                             msun,
                                             np.atleast_2d(tbl['d']).T)
            ml = tbl['logMstar'] - logLsol
            colname = "logML_{0}".format(band_name)
            tbl.add_column(Column(name=colname, data=ml))

        return tbl
コード例 #19
0
    def _do_photometry(self, n_start=1):
        """
        Helper function which performs the iterations of the photometry process.

        Parameters
        ----------
        n_start : int
            Integer representing the start index of the iteration.
            It is 1 if positions are None, and 2 otherwise.

        Returns
        -------
        output_table : `~astropy.table.Table` or None
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process.
            None is returned if no sources are found in ``image``.
        """

        output_table = Table([[], [], [], [], [], [], [], [], []],
                names=('x_0', 'y_0', 'flux_0', 'id', 'group_id',
                       'iter_detected', 'x_fit', 'y_fit',
                       'flux_fit'),
                dtype=('f8', 'f8', 'f8', 'i4', 'i4', 'i4', 'f8', 'f8',
                       'f8'))
        sources = self.finder(self._residual_image)

        n = n_start
        while(len(sources) > 0 and
              (self.niters is None or n <= self.niters)):
            apertures = CircularAperture((sources['xcentroid'],
                                          sources['ycentroid']),
                                          r=self.aperture_radius)
            sources['aperture_flux'] = aperture_photometry(self._residual_image,
                    apertures)['aperture_sum']
            init_guess_tab = Table(names=['x_0', 'y_0', 'flux_0'],
                               data=[sources['xcentroid'],
                               sources['ycentroid'],
                               sources['aperture_flux']])

            star_groups = self.group_maker(init_guess_tab)
            table, self._residual_image = super(IterativelySubtractedPSFPhotometry,
                    self).nstar(self._residual_image, star_groups)
            table = hstack([init_guess_tab, table])

            table['iter_detected'] = n*np.ones(table['x_fit'].shape, dtype=np.int)

            output_table = vstack([output_table, table])
            sources = self.finder(self._residual_image)
            n += 1

        return output_table
コード例 #20
0
ファイル: results.py プロジェクト: drex44/gammapy
    def to_table(self, **kwargs):
        """Return `~astropy.table.Table` containing all results

        only SpectrumStats and SpectrumFitResult are taken into account
        """

        temp = [self.stats, self.fit]
        val = [_ for _ in temp if _]
        l = list()
        for result in val:
            if result is not None:
                l.append(result.to_table(**kwargs))
        return hstack(l)
コード例 #21
0
ファイル: batch.py プロジェクト: aileisun/bubblepy
	def _create_empty_list_table(self):
		"""
		Return an empty table with columns ra, dec, obj_name and those in args_to_list 
		"""
		d = np.dtype([('ra', 'float64'), ('dec', 'float64'), ('obj_name', 'S64')])
		tab0 = at.Table(dtype=d)

		if len(self.args_to_list) > 0:
			tab1 = at.Table(dtype=self.args_to_list_dtype)
			tab =  at.hstack([tab0, tab1])
		else:
			tab = tab0

		return tab
コード例 #22
0
ファイル: examine_gas.py プロジェクト: jd-au/magmo-HI
def compare_dickey_2003(magmo_coords, magmo_table):
    print("## Comparing with Dickey et al 2003 ##")

    dickey_table = ascii.read('../Dickey_2003_sources.txt')
    dickey_coords = SkyCoord(dickey_table['RA'], dickey_table['Dec'], frame='fk5', unit="deg")

    matches = find_best_matches(magmo_coords, dickey_coords, 2.16 * u.arcmin, magmo_table)
    t1 = dickey_table[matches[:, 0].astype(int)]
    t2 = magmo_table[matches[:, 1].astype(int)]
    combined = hstack([t1, t2], join_type='exact')
    dist_col = Column(name='Separation', data=matches[:, 2], unit=u.degree)
    combined.add_column(dist_col)
    combined.sort('Name_1')
    combined.write('dm-combined.vot', format='votable', overwrite=True)
コード例 #23
0
ファイル: cfhtw4.py プロジェクト: imcgreer/QLFz4
def match_target_tab(paperdir=None,obsdir=None):
	zcat = read_s82_catalog()
	if obsdir is None:
		obsdir = os.path.join(os.environ['HOME'],'research','LSST',
		                      'Stripe82','2014October')
	cans = Table.read(os.path.join(obsdir,'cfhtw4_candidates_v3.fits'))
	cans = cans[ (cans['dec']<1.25) & (cans['mags'][:,3]>21.5) & 
	             (cans['mags'][:,3]<22.5) ]
	c1 = SkyCoord(cans['ra'],cans['dec'],unit=(u.deg,u.deg))
	c2 = SkyCoord(zcat['ra'],zcat['dec'],unit=(u.deg,u.deg))
	idx,d2d,d3c = match_coordinates_sky(c1,c2)
	ii = np.where(d2d.arcsec < 3.0)[0]
	assert np.all(ii == np.arange(len(cans)) )
	cfhtw4 = hstack([cans,zcat['M1450','z','obsdate','name'][idx[ii]]])
	cfhtw4.write('cfhtw4qsos.fits',overwrite=True)
コード例 #24
0
ファイル: chain.py プロジェクト: jonathansick/sedbot
    def build_pixels_table(self, n_pixels=None):
        """Build the ``pixels`` table, which is built from the `'pixels'`
        metadata of individual chains.
        """
        with h5py.File(self._filepath, 'a') as f:
            if 'pixels' in f:
                del f['pixels']
        pixel_ids = self.pixel_ids
        if len(pixel_ids) == 0:
            return
        pixel_ids.sort()

        if n_pixels is None:
            # Hack to ensure pixel grid is at least big enough
            n_pixels = max(pixel_ids) + 1
        assert n_pixels > max(pixel_ids)

        # Get schema for the pixel metadata
        chain0 = self.read_chain(0)
        n_bands = len(chain0.meta['sed'])
        obs_bands = chain0.meta['observed_bands']
        instruments = chain0.meta['instruments']

        pixels_dtype = chain0.meta['pixels'].dtype
        pixel_data = np.empty(n_pixels, dtype=pixels_dtype)
        pixel_data.fill(np.nan)

        sed_data = np.empty(
            n_pixels,
            dtype=np.dtype([('sed', np.float, n_bands),
                            ('sed_err', np.float, n_bands)]))
        sed_data.fill(np.nan)

        for pixel_id in pixel_ids:
            chain = self.read_chain(pixel_id)
            for colname in pixels_dtype.names:
                pixel_data[colname][pixel_id] = chain.meta['pixels'][colname]
            sed_data['sed'][pixel_id] = chain.meta['sed']
            sed_data['sed_err'][pixel_id] = chain.meta['sed_err']
        meta = {"observed_bands": obs_bands,
                "instruments": instruments}
        pixel_table = Table(pixel_data, meta=meta)
        sed_table = Table(sed_data)
        pixel_table = hstack([pixel_table, sed_table], join_type='exact')
        pixel_table.write(self._filepath, path="pixels", format="hdf5",
                          append=True, overwrite=True)
コード例 #25
0
ファイル: db.py プロジェクト: rbiswas4/OM10
    def paint(self,Nmax=None,verbose=False,lrg_input_cat='$OM10_DIR/data/LRGo.txt',qso_input_cat='$OM10_DIR/data/QSOo.txt'):
        ## read data from SDSS
        f=open(os.path.expandvars(lrg_input_cat),'r')
        lrg=loadtxt(f)
        f.close()
        #print lrg[0,0],lrg.shape
        g=open(os.path.expandvars(qso_input_cat),'r')
        qso=loadtxt(g)
        g.close()
        #print qso[0,0],qso.shape

        ###MY OWN REDSHIFT ONLY MATCHING HERE:

        lens_props = ['MAGG_LENS','MAGR_LENS','MAGI_LENS','MAGZ_LENS', \
        'MAGW1_LENS','MAGW2_LENS','MAGW3_LENS','MAGW4_LENS', 'SDSS_FLAG_LENS']

        src_props = ['MAGG_SRC','MAGR_SRC','MAGI_SRC','MAGZ_SRC', \
        'MAGW1_SRC','MAGW2_SRC','MAGW3_SRC','MAGW4_SRC', 'SDSS_FLAG_SRC']

        tmp_lens = Table(np.zeros((len(self.sample),len(lens_props)),dtype='f8'),names=lens_props)
        tmp_src = Table(np.zeros((len(self.sample),len(src_props)),dtype='f8'),names=src_props)

        if verbose: print 'setup done'

        lrg_sort = lrg[np.argsort(lrg[:,0]),:]
        qso_sort = qso[np.argsort(qso[:,0]),:]
        lens_count = 0

        for lens in self.sample:

            #paint lens
            ind = np.searchsorted(lrg_sort[:,0],lens['ZLENS'])
            if ind >= len(lrg_sort): ind = len(lrg_sort) - 1
            tmp_lens[lens_count] = lrg_sort[ind,6:] - lrg_sort[ind,8] + lens['APMAG_I'] #assign colors, not mags
            #paint source
            qso_ind = np.searchsorted(qso_sort[:,0],lens['ZSRC'])
            if qso_ind >= len(qso_sort): qso_ind = len(qso_sort) - 1
            tmp_src[lens_count] = qso_sort[qso_ind,1:] - qso_sort[qso_ind,3] + lens['MAGI']

            lens_count += 1

        self.sample = hstack([self.sample,tmp_lens,tmp_src])

        return
コード例 #26
0
ファイル: aperphot.py プロジェクト: navtejsingh/pychimera
 def phot(self, image, objpos, aper):
     """
     Aperture photometry using Astropy's photutils.
     
     Parameters
     ----------
     image : numpy array
         2D image array
         
     objpos : list of tuple
         Object poistions as list of tuples
         
     aper : float
         Aperture radius in pixels
      
     Returns 
     -------
     phot_table : astropy table
          Output table with stellar photometry   
     """
     try:
         from astropy.table import hstack
         from photutils import aperture_photometry, CircularAnnulus, CircularAperture
     except ImportError:
         pass
 
     apertures = CircularAperture(objpos, r = aper) 
     annulus_apertures = CircularAnnulus(objpos, r_in = self.inner_radius, r_out = self.outer_radius)
     
     rawflux_table = aperture_photometry(image, apertures = apertures, method = self.method)
     bkgflux_table = aperture_photometry(image, apertures = annulus_apertures, method = self.method)
     phot_table = hstack([rawflux_table, bkgflux_table], table_names = ["raw", "bkg"])
     
     bkg = phot_table["aperture_sum_bkg"] / annulus_apertures.area()
     phot_table["msky"] = bkg
     phot_table["area"] = apertures.area()
     phot_table["nsky"] = annulus_apertures.area()
             
     bkg_sum = bkg * apertures.area()
     final_sum = phot_table["aperture_sum_raw"] - bkg_sum
     phot_table["flux"] = final_sum
     
     return phot_table
コード例 #27
0
def get_main_catalog(args, index_table):
    """Makes main catalog containing information on all selected galaxies.
    Columns are identical to COSMOS Real Galaxy catalog"""
    print "Creating main catalog" 
    all_seg_ids = np.loadtxt(args.seg_list_file, delimiter=" ",dtype='S2')
    for f, filt in enumerate(args.filter_names):
    	final_table = main_table()
        complete_table=Table()
        for seg_id in all_seg_ids:
            file_name = args.main_path + seg_id + '/' + filt + '_with_pstamp.fits'
            seg_cat = Table.read(file_name, format='fits')
            q, = np.where(index_table['SEG_ID'] == seg_id)
            indx_seg = index_table[q]
            temp = join(seg_cat, indx_seg, keys='NUMBER')
            col = Column(temp['HDU'], name='PSF_HDU')
            temp.add_column(col)
            temp.rename_column('MAG_AUTO', 'MAG')
            temp.rename_column('HDU', 'GAL_HDU')
            p_scales = np.ones(len(q))*0.03
            weights = np.ones(len(q))
            im = [args.gal_im_name.replace('filter', args.file_filter_name[f])]*len(q)
            im_names = [im[i].replace('umber',str(temp['FILE_NUM'][i])) for i in range(len(im))]
            psf = [args.psf_im_name.replace('filter', args.file_filter_name[f])]*len(q)
            psf_names = [psf[i].replace('umber',str(temp['FILE_NUM'][i])) for i in range(len(psf))]
            noise_names=[args.noise_file_name.replace('filter', args.file_filter_name[f])]*len(q)
            names = ('WEIGHT','GAL_FILENAME', 'PSF_FILENAME',
                     'PIXEL_SCALE', 'NOISE_FILENAME')
            dtype =('f8', 'S256', 'S288', 'f8', 'S208')
            tab = [weights, im_names, psf_names, p_scales, noise_names]
            temp2 = Table(tab, names=names, dtype=dtype)
            temp = hstack([temp,temp2])
            final_table = vstack([final_table,temp], join_type='inner')
            complete_table = vstack([complete_table,temp])
        path = args.main_path + args.out_dir 
        cat_name = args.cat_name.replace('filter', args.file_filter_name[f])
        index_table.sort('ORDER')
        ord_indx = [np.where(i_t==final_table['IDENT'])[0][0] for i_t in index_table['IDENT']]
        final_table[ord_indx].write(path + cat_name, format='fits',
                                                overwrite=True)
        print "Savings fits file at ", path + cat_name
        cat_name = 'complete_' + args.cat_name.replace('filter', args.file_filter_name[f])
        complete_table[ord_indx].write(args.main_path + cat_name, format='fits',
                                                   overwrite=True)
コード例 #28
0
ファイル: wcsimage.py プロジェクト: philhodge/jwst
    def _recalc_catalog_xy(self, catalog):
        colnames = catalog.colnames

        if 'xref' not in colnames and 'yref' not in colnames:
            x, y = self._rd2xyref(catalog)
            txy = table.Table([x, y], names=('xref', 'yref'),
                              dtype=(np.float64, np.float64))
            catalog = table.hstack([catalog, txy], join_type='exact')

        elif 'xref' in colnames and 'yref' in colnames:
            x, y = self._rd2xyref(catalog)
            catalog['xref'] = x
            catalog['yref'] = y

        else:
            raise ValueError("Reference catalog must either have or not "
                             "have *both* 'x' and 'y' columns.")

        return catalog
コード例 #29
0
def ns_combine(ned_name, simbad_name, ns_combine, final_tab, match_tol = 1.0): # match_tol in arcsec

    ned_in = Table.read(ned_name)
    simbad_in = Table.read(simbad_name)
        
    # prelim processing
    ned_proc = reformat_cat(ned_in, old_name='Object Name', new_name='Name_N', old_type='Type', new_type='Type_N',
                            keepcols=['Object Name','RA(deg)','DEC(deg)','Type'])
    sim_proc = reformat_cat(simbad_in, old_name='MAIN_ID', new_name='Name_S', old_type='OTYPE', new_type='Type_S')

    # construct coordinates needed for matching
    ned_coo = SkyCoord(ra=ned_proc['RA(deg)'], dec=ned_proc['DEC(deg)'])
    sim_coo = SkyCoord(ra=sim_proc['RA_d'], dec=sim_proc['DEC_d']) 

    # do the matching
    matched_ned, matched_sim, ned_only, sim_only = symmetric_match_sky_coords(ned_coo, sim_coo, match_tol*u.arcsec)

    # generate the matched table
    # hstack is "horizontal stack", ie put the NED and SIMBAD columns for matched objects in the same row
    matchtab = hstack([ned_proc[matched_ned], sim_proc[matched_sim]], join_type = 'outer')
    
    # find the secure matches, save these as intermediate results
    matchtab2 = process_match(matchtab)
    matchtab2.write(ns_combine, format='fits')

    # process the secure match catalog
    keeplist = ['Name_N','RA(deg)','DEC(deg)','Type_N']
    matchtab3 = process_unmatch(Table(matchtab2[keeplist]), src='NS', rename_cols = ned_rename)

    #process the catalogs containing NED-only and SIMBAD-only objects
    nedcat = process_unmatch(ned_proc[ned_only], src = 'N', rename_cols= ned_rename)
    simcat = process_unmatch(sim_proc[sim_only], src = 'S', rename_cols = sim_rename)   
    
    # add the secure matches to the NED-only and SIMBAD-only catalogs
    # NB: I think this implies that the "insecure" matches just get thrown away - is that what we want?
    finaltab = vstack([matchtab3, nedcat, simcat],join_type = 'outer')
    
    # save the result
    finaltab.write(final_tab, format='fits')
            
    return
コード例 #30
0
ファイル: spector.py プロジェクト: aileisun/bubblepy
	def make_lineflux(self, lines=['NeIII3870', 'NeIII3969', 'Hg', 'Hb', 'OIII4960', 'OIII5008', 'OI6302', 'OI6366'], u_flux=u.Unit("1E-17 erg cm-2 s-1"), overwrite=False):
		""" 
		make file spec_lineflux.csv that contains the flux of the specified lines. The fluxes are calculated by integrating the line component of the spectrum over a window of +/- 1400 km/s. 

		WARNING: currently only Hb, and OIII lines are supported. For lines that are overlapped, e.g., Ha and NII, the current implemenation will double count the flux. 

		Params
		------
		lines=['Hb', 'OIII4960', 'OIII5008']
		u_flux=u.Unit("1E-17 erg cm-2 s-1")
			the unit of the output
		overwrite=False

		Return
		------
		status (bool)
		"""
		fn = self.fp_spec_lineflux

		self.make_spec_decomposed_ecsv(overwrite=False)

		if not os.path.isfile(fn) or overwrite:
			print("[spector] making spec_lineflux")
			tab = at.Table()

			for line in lines:
				f, ferr = self._calc_line_flux(line=line, u_flux=u_flux, wunit=False)

				col_new = at.Table([[f], [ferr]], names=['f_{}'.format(line), 'ferr_{}'.format(line)])
				tab = at.hstack([tab, col_new])

			tab.meta['comments'] = ["unit_flux: {}".format(u_flux.to_string()),]

			tab.write(fn, comment='#', format='ascii.csv', overwrite=overwrite)

		else:
			print("[spector] skip making spec_lineflux as file exists")

		status = os.path.isfile(fn)
		return status 
コード例 #31
0
def load_catalog(catalog,
                 doFermi=False,
                 doSimbad=False,
                 doField=False,
                 field=-1,
                 algorithm='ECE',
                 filt="g"):

    customSimbad = Simbad()
    customSimbad.add_votable_fields("otype(V)")
    customSimbad.add_votable_fields("otype(3)")
    customSimbad.add_votable_fields("otype(N)")
    customSimbad.add_votable_fields("otype(S)")

    if doFermi:
        filenames = sorted(glob.glob(os.path.join(catalog,"*/*.dat")))[::-1] + \
                    sorted(glob.glob(os.path.join(catalog,"*/*.h5")))[::-1]
    elif doField:
        filenames = sorted(glob.glob(os.path.join(catalog,"%d_*.dat" % field)))[::-1] + \
                    sorted(glob.glob(os.path.join(catalog,"%d_*.h5" % field)))[::-1] + \
                    sorted(glob.glob(os.path.join(catalog,"data_%04d_*.h5" % field)))[::-1]
    else:
        filenames = sorted(glob.glob(os.path.join(catalog,"*.dat")))[::-1] + \
                    sorted(glob.glob(os.path.join(catalog,"*.h5")))[::-1]

    h5names = [
        "objid", "ra", "dec", "stats0", "stats1", "stats2", "stats3", "stats4",
        "stats5", "stats6", "stats7", "stats8", "stats9", "stats10", "stats11",
        "stats12", "stats13", "stats14", "stats15", "stats16", "stats17",
        "stats18", "stats19", "stats20", "stats21"
    ]

    h5periodnames = [
        "objid", "period", "sig", "pdot", "periodicstats0", "periodicstats1",
        "periodicstats2", "periodicstats3", "periodicstats4", "periodicstats5",
        "periodicstats6", "periodicstats7", "periodicstats8", "periodicstats9",
        "periodicstats10", "periodicstats11", "periodicstats12",
        "periodicstats13"
    ]

    bands = {'g': 1, 'r': 2, 'i': 3}
    filter_id = bands[filt]
    cnt = 0
    data = []
    #filenames = filenames[:500]
    for ii, filename in enumerate(filenames):
        if np.mod(ii, 100) == 0:
            print(filename)
            print('Loading file %d/%d' % (ii, len(filenames)))

        filenameSplit = filename.split("/")
        catnum = filenameSplit[-1].replace(".dat",
                                           "").replace(".h5",
                                                       "").split("_")[-1]

        try:
            with h5py.File(filename, 'r') as f:
                name = f['names'][()]
                filters = f['filters'][()]
                stats = f['stats'][()]
                periodic_stats = f['stats_%s' % algorithm][()]
        except:
            continue

        data_tmp = Table(rows=stats, names=h5names)
        data_tmp['name'] = name
        data_tmp['filt'] = filters
        data_period_tmp = Table(rows=periodic_stats, names=h5periodnames)
        data_period_tmp.remove_column('objid')
        data_tmp = hstack([data_tmp, data_period_tmp], join_type='inner')
        if len(data_tmp) == 0: continue

        #data_tmp.add_index('name')
        if doField:
            idx = np.where(filter_id != data_tmp['filt'].astype(int))[0]
            data_tmp.remove_rows(idx)
            if len(data_tmp) == 0: continue

        data_tmp['name'] = data_tmp['name'].astype(str)
        data_tmp['filt'] = data_tmp['filt'].astype(str)
        data_tmp['catnum'] = int(catnum) * np.ones(data_tmp["ra"].shape)

        coord = SkyCoord(data_tmp["ra"], data_tmp["dec"], unit=u.degree)
        simbad = ["N/A"] * len(coord)
        if doSimbad:
            print('Querying simbad: %d/%d' % (ii, len(filenames)))
            doQuery = True
            result_table = None
            nquery = 1
            while doQuery and (not ii == 1078):
                try:
                    result_table = customSimbad.query_region(coord,
                                                             radius=2 *
                                                             u.arcsecond)
                    doQuery = False
                    nquery = nquery + 1
                except:
                    nquery = nquery + 1
                    time.sleep(10)
                    continue
                if nquery >= 3:
                    break

            if not result_table is None:
                ra = result_table['RA'].filled().tolist()
                dec = result_table['DEC'].filled().tolist()

                ra = Angle(ra, unit=u.hour)
                dec = Angle(dec, unit=u.deg)

                coords2 = SkyCoord(ra=ra, dec=dec, frame='icrs')
                idx, sep, _ = coords2.match_to_catalog_sky(coord)
                for jj, ii in enumerate(idx):
                    simbad[ii] = result_table[jj]["OTYPE_S"]
        data_tmp['simbad'] = simbad
        data_tmp['simbad'] = data_tmp['simbad'].astype(str)

        if cnt == 0:
            data = copy.copy(data_tmp)
        else:
            data = vstack([data, data_tmp])
        cnt = cnt + 1

    if len(data) == 0:
        print('No data in %s available...' % catalog)
        return []

    sig = data["sig"]
    idx = np.arange(len(sig)) / len(sig)
    sigsort = idx[np.argsort(sig)]
    data["sigsort"] = sigsort

    return data
コード例 #32
0
def _attach_header_columns_to_tab(fn, theader):
	tab = at.Table.read(fn)
	tab = at.hstack([theader, tab])
	tab.write(fn, overwrite=True)
コード例 #33
0
# Get the meta for tilt1, tilt2, phi12
meta_tilt1 = tuple(data.columns.items())[tilt1_id][1].meta
meta_tilt2 = tuple(data.columns.items())[tilt2_id][1].meta
meta_phi12 = tuple(data.columns.items())[phi12_id][1].meta
meta_f_ref = tuple(data.columns.items())[f_ref_id][1].meta

# Create an astropy table with the evolved spin samples
tilts_evol = Table([
    Column(tilt1_evol, name='tilt1' + label, meta=meta_tilt1),
    Column(tilt2_evol, name='tilt2' + label, meta=meta_tilt2),
    Column(phi12_evol, name='phi12' + label, meta=meta_phi12)
])

# Append the columns to the existing astropy table of samples from the HDF5 file
data_joined = hstack([data, tilts_evol])

if v_final != "ISCO":
    vfinal_col = Table([
        Column(v_final * np.ones_like(tilt1_evol),
               name='vfinal',
               meta=meta_f_ref)
    ])
    data_joined = hstack([data_joined, vfinal_col])

_remap_colnames(data_joined)

f = h5py.File(hdf_pos_file, 'r')
path = '/lalinference/' + run_identifier + '/posterior_samples'
level = f[path]
arrt = level.attrs
コード例 #34
0
ファイル: photometry.py プロジェクト: aboucaud/photutils
    def nstar(self, image, star_groups):
        """
        Fit, as appropriate, a compound or single model to the given
        ``star_groups``. Groups are fitted sequentially from the
        smallest to the biggest. In each iteration, ``image`` is
        subtracted by the previous fitted group.

        Parameters
        ----------
        image : numpy.ndarray
            Background-subtracted image.
        star_groups : `~astropy.table.Table`
            This table must contain the following columns: ``id``,
            ``group_id``, ``x_0``, ``y_0``, ``flux_0``.  ``x_0`` and
            ``y_0`` are initial estimates of the centroids and
            ``flux_0`` is an initial estimate of the flux. Additionally,
            columns named as ``<param_name>_0`` are required if any other
            parameter in the psf model is free (i.e., the ``fixed``
            attribute of that parameter is ``False``).

        Returns
        -------
        result_tab : `~astropy.table.Table`
            Astropy table that contains photometry results.
        image : numpy.ndarray
            Residual image.
        """

        result_tab = Table()
        for param_tab_name in self._pars_to_output.keys():
            result_tab.add_column(Column(name=param_tab_name))

        unc_tab = Table()
        for param, isfixed in self.psf_model.fixed.items():
            if not isfixed:
                unc_tab.add_column(Column(name=param + "_unc"))

        y, x = np.indices(image.shape)

        star_groups = star_groups.group_by('group_id')
        for n in range(len(star_groups.groups)):
            group_psf = get_grouped_psf_model(self.psf_model,
                                              star_groups.groups[n],
                                              self._pars_to_set)
            usepixel = np.zeros_like(image, dtype=np.bool)

            for row in star_groups.groups[n]:
                usepixel[overlap_slices(large_array_shape=image.shape,
                                        small_array_shape=self.fitshape,
                                        position=(row['y_0'], row['x_0']),
                                        mode='trim')[0]] = True

            fit_model = self.fitter(group_psf, x[usepixel], y[usepixel],
                                    image[usepixel])
            param_table = self._model_params2table(fit_model,
                                                   len(star_groups.groups[n]))
            result_tab = vstack([result_tab, param_table])

            if 'param_cov' in self.fitter.fit_info.keys():
                unc_tab = vstack([
                    unc_tab,
                    self._get_uncertainties(len(star_groups.groups[n]))
                ])
            try:
                from astropy.nddata.utils import NoOverlapError
            except ImportError:
                raise ImportError("astropy 1.1 or greater is required in "
                                  "order to use this class.")
            # do not subtract if the fitting did not go well
            try:
                image = subtract_psf(image,
                                     self.psf_model,
                                     param_table,
                                     subshape=self.fitshape)
            except NoOverlapError:
                pass

        if 'param_cov' in self.fitter.fit_info.keys():
            result_tab = hstack([result_tab, unc_tab])

        return result_tab, image
コード例 #35
0
t1.write('newtable.fits', overwrite=True)
# for a different format
t1.write('newtable.dat', format='ascii')
# there are TONS of formats available for both reading in and writing out,
# including latex tables, sextractor catalogs, machine readable tables
# that astronomy journals require, etc.
# see here:
#   https://docs.astropy.org/en/stable/io/unified.html#table-io

### stacking tables
# let's say you have two tables and you already know they line up exactly
# i.e., 0th row corresponds to 0th row, etc. (NOTE that this is not the case
# with t1 and t2)
# you can stack the tables horizontally:
from astropy.table import hstack
new = hstack([t1, t2])
# new has the same number of rows as t1 and t2, but now includes all columns
# from both tables together.
# you'll notice that columns with the same names have been renamed
# so field is now field_1 and field_2 because it appears in both tables

# if you have two tables with at least a few column names in common, you can
# vertically stack them:
from astropy.table import vstack
new2 = vstack([t1, t2])
# new2 has one copy of all unique columns from the two tables, and has
# the number of rows in t1 + rows in t2
# you'll see that where one of the tables didn't have a column in the other,
# the value is --. for example, t1 did not have a 'z' column, so in new2 there
# are a bunch of --'s associated with 'z' for the objects in t1.
# These represent data that is masked out or missing. Astropy Table is
コード例 #36
0
ファイル: cont_src.py プロジェクト: Fil8/SHARPener
def find_src_imsad(cfg_par):
    """Finds the continuum sources according to the options set in the source_finder sub-keys

    The function stores the sources as a raw txt file straight from miriad, a formatted csv file and a karma annotation file.

    Parameters
    ----------
    cfg_par : str
        Parameter file loaded with sharpener
    
    Returns
    -------
    src_list : str
        csv table of continuum sources (output of MIRIAD imsad)
    """

    print('# Find continuum sources ')

    # Getting directories and convert files if necessary
    # ++++++++++++++++++++++++++++++++++++++++++++++++++
    os.chdir(cfg_par['general']['workdir'])
    sharpDir = 'sharpOut/'
    cont_im_mir = sharpDir + os.path.basename(
        cfg_par['general']['mircontname'])
    cont_im = cfg_par['general']['shortContname']
    # cannot use cfg_par, probably because file name would be too long for miriad
    src_imsad_out = cfg_par['general']['absdir'] + 'mir_src_sharp.txt'

    #src_imsad_out = '{0:s}mir_src_sharp.txt'.format(
    #   'sharpOut/abs/')

    key = 'source_finder'

    if os.path.exists(cont_im_mir) == False and os.path.exists(
            cont_im) == True:
        fits = lib.miriad('fits')
        fits.op = 'xyin'
        fits.in_ = cont_im
        fits.out = cont_im_mir
        fits.go(rmfiles=True)

    elif os.path.exists(cont_im) == False and os.path.exists(
            cont_im_mir) == True:

        fits = lib.miriad('fits')
        fits.op = 'xyout'
        fits.in_ = cont_im_mir
        fits.out = cont_im
        fits.go(rmfiles=True)

    # Run IMSAD in Miriad to get the source
    # ++++++++++++++++++++++++++++++++++++++
    imsad = lib.miriad('imsad')
    imsad.in_ = cont_im_mir
    imsad.out = src_imsad_out
    imsad.clip = cfg_par[key]['clip']
    #imsad.region = 'boxes\('+self.abs_ex_imsad_region+'\)'
    imsad.options = cfg_par[key]['options']

    imsad.go(rmfiles=True)

    # It seems that the length of a line in the Miriad
    # output file from imsad can vary depending on the flags.
    # It is necessary to check for this and adjust the length
    check_miriad_output(src_imsad_out)

    # Modify output of imsad to save it as csv
    # ++++++++++++++++++++++++++++++++++++++++
    # changed to use ascii.read functionality
    # src_list = open(src_imsad_out,'r')
    # lines = src_list.readlines()
    # len_lines = len(lines)

    # ra_tmp = []
    # dec_tmp = []
    # peak_tmp = []

    # for i in xrange (0,len_lines):
    #   lines[i] = lines[i].strip()
    #   tmp = lines[i].split(' ')
    #   ra_tmp.append(str(tmp[3]))
    #   dec_tmp.append(str(tmp[4]))
    #   peak_tmp.append(str(tmp[6]))

    # ra_tmp = np.array(ra_tmp)
    # dec_tmp = np.array(dec_tmp)
    # peak_tmp = np.array(peak_tmp)

    # #ID
    # ids = np.array(np.arange(1,len_lines+1,1),dtype=str)

    # #J2000
    # #convert ra
    # ra_vec = []
    # for i in xrange (0, len_lines):
    #   line = ra_tmp[i].split(':')
    #   last_dig = int(round(float(line[2]),0))
    #   if last_dig < 10:
    #       last_dig = '0'+str(last_dig)
    #   ra_vec.append(line[0]+line[1]+str(last_dig))

    # #convert dec
    # dec_vec = []
    # dec_coord = []
    # for i in xrange (0, len_lines):
    #   line = dec_tmp[i].split(':')
    #   first_dig =  line[0][-2:]
    #   last_dig = int(round(float(line[2]),0))

    #   dec_vec.append(first_dig[1]+line[1]+str(last_dig))

    #   if line[0][-3] == '-':
    #       dec_coord.append('-'+first_dig+':'+line[1]+':'+str(last_dig))
    #       J2000_tmp = np.array([ a+'-'+b for a,b in zip(ra_vec,dec_vec)])
    #   if line[0][-3] == '+':
    #       dec_coord.append('+'+first_dig+':'+line[1]+':'+str(last_dig))
    #       J2000_tmp = np.array([ a+'+'+b for a,b in zip(ra_vec,dec_vec)])

    # dec_coord = np.array(dec_coord)

    # read in data and rename columns
    src_list = ascii.read(src_imsad_out,
                          include_names=[
                              'col3', 'col4', 'col5', 'col6', 'col7', 'col8',
                              'col9', 'col10', 'col11', 'col12'
                          ])
    src_list.rename_column('col3', 'ra')
    src_list.rename_column('col4', 'dec')
    src_list.rename_column('col5', 'peak')
    src_list.rename_column('col6', 'flux_int')
    src_list.rename_column('col7', 'beam_major_decon')
    src_list.rename_column('col8', 'beam_minor_decon')
    src_list.rename_column('col9', 'beam_pang_decon')
    src_list.rename_column('col10', 'FLAG')
    src_list.rename_column('col11', 'DFLAG')
    src_list.rename_column('col12', 'FFLAG')
    n_src = np.size(src_list['ra'])

    # correct the miriad bug
    src_list['dec'] = np.array(
        [src.replace('+0+', '+') for src in src_list['dec']])

    # create two new columns
    # column 1: ID will be filled after sorting the columns
    src_id = np.zeros(n_src)

    # column 2: Source name
    j2000 = np.array([
        "{0:s}{1:s}".format(src_list['ra'][k].replace(':', ''),
                            src_list['dec'][k].replace(':', ''))
        for k in range(n_src)
    ])

    # create a table and merge it
    new_columns = Table([src_id, j2000], names=('ID', 'J2000'))
    src_list = hstack([new_columns, src_list])

    # sort the sources after source names
    src_list = src_list.group_by('J2000')

    # now assign ID
    src_list['ID'] = np.arange(n_src) + 1

    ### Find pixels of sources in continuum image
    #open continuum image

    # if 'CTYPE4' in prihdr:
    #   del prihdr['CTYPE4']
    # if 'CDELT4' in prihdr:
    #   del prihdr['CDELT4']
    # if 'CRVAL4' in prihdr:
    #   del prihdr['CRVAL4']
    # if 'CUNIT4' in prihdr:
    #   del prihdr['CUNIT4']
    # if 'CRPIX4' in prihdr:
    #   del prihdr['CRPIX4']
    # if 'CTYPE3' in prihdr:
    #   del prihdr['CTYPE3']
    # if 'CDELT3' in prihdr:
    #   del prihdr['CDELT3']
    # if 'CRVAL3' in prihdr:
    #   del prihdr['CRVAL3']
    # if 'CRPIX3' in prihdr:
    #   del prihdr['CRPIX3']
    # if 'CUNIT3' in prihdr:
    #   del prihdr['CUNIT3']
    # if 'NAXIS3' in prihdr:
    #   del prihdr['NAXIS3']
    # if 'NAXIS' in prihdr:
    #   del prihdr['NAXIS']
    # prihdr['NAXIS']=2
    hdulist = pyfits.open(cont_im)  # read input
    # read data and header
    #what follows works for wcs, but can be written better
    prihdr = hdulist[0].header
    w = wcs.WCS(prihdr)

    # RS: the rest of the function requires only 2 axis images
    if w.naxis == 4:
        w = w.dropaxis(3)
        w = w.dropaxis(2)
        img = hdulist[0].data[0][0]
    elif w.naxis == 3:
        w = w.dropaxis(2)
        img = hdulist[0].data[0]

    coord_list = coord.SkyCoord(src_list['ra'],
                                src_list['dec'],
                                unit=(u.hourangle, u.deg),
                                frame='fk5')

    px_ra = np.zeros(n_src, dtype=int)
    px_dec = np.zeros(n_src, dtype=int)
    for k in range(n_src):
        px_ra[k], px_dec[k] = w.wcs_world2pix(coord_list[k].ra,
                                              coord_list[k].dec, 1)
        px_ra[k] = int(round(px_ra[k], 0))
        px_dec[k] = int(round(px_dec[k], 0))

    # create a table and merge it
    new_columns = Table([px_ra, px_dec], names=('pixel_ra', 'pixel_dec'))
    src_list = hstack([src_list, new_columns])

    # print(src_list)
    # print(cfg_par['general'].get('absdir'))
    src_list.write('{0:s}mir_src_sharp.csv'.format(
        cfg_par['general'].get('absdir')),
                   format='csv',
                   overwrite=True)

    # create a karma annotation file
    create_karma_annotation_file(coord_list, cfg_par)

    if cfg_par['abs_plot']['plot_contImage']:

        abs_plot.plot_continuum(cfg_par)

    # pixels_cont=np.zeros([ra_tmp.shape[0],2])
    # ra_list_tmp = np.zeros([ra_tmp.shape[0]])
    # for i in xrange (0,ra_tmp.shape[0]):

    #   ra_deg_tmp = conv_units.ra2deg(ra_tmp[i])
    #   dec_deg_tmp = conv_units.dec2deg(dec_coord[i])

    #   px,py=w.wcs_world2pix(ra_deg_tmp,dec_deg_tmp,0)
    #   pixels_cont[i,0]= str(round(px,0))
    #   pixels_cont[i,1]= str(round(py,0))

    # #make csv file with ID, J2000, Ra, Dec, Pix_y, Pix_y, Peak[Jy]
    # tot = np.column_stack((ids,J2000_tmp,ra_tmp,dec_coord,
    #                      pixels_cont[:,0],pixels_cont[:,1],peak_tmp))

    # write_src_csv(tot,cfg_par)

    hdulist.close()

    print('# Continuum sources found #')

    return src_list
コード例 #37
0
def cross_matching(ref_catalogue,
                   pre_snr_tar_catalogue,
                   original_dist_tar_catalogue,
                   confidence_percentile,
                   single_candidate_confidence,
                   log,
                   options,
                   run,
                   already_cross_matched=None,
                   snr_restriction=False,
                   flux_match=True,
                   final_run=False):
    """
        Take the updated reference and target catalogues to perform a cross match within a resolution radius. Allows user to define a restriction on the SNR threshold of source, how tightly the fluxes should match and a normalisation factor for fluxes between the two catalogues.
        
        :param ref_catalogue: the reference catalogue with only un-cross-matched sources remaining in it
        :param pre_snr_tar_catalogue: the target catalogue with only un-cross-matched source remaining in it, which hasn't been filtered by SNR yet.
        :param original_dist_tar_catalogue: same sources as pre_snr_tar_catalogue but with original positions of all target sources
        :param confidence_percentile: float between 0 and 1 to indicate the required confidence interval for a match

        :param snr_restriction: an integer or float that defines some lower limit on SNR. Only target sources above this SNR will be cross matched (optional, default is False)
        :param flux_match: If this condition is True matching probability will be altered by how close the sources match in flux (optional, default is True)
        :param final_run: argument required for comb_prob to override the set confidence percentile

        :return: target catalogue with modelled offsets applied
        """

    if final_run == True:
        log.info(
            "Returning most likely match to remaining {0} unmatched sources".
            format(len(pre_snr_tar_catalogue)))
    else:
        log.info("Initialising match of {0} target sources".format(
            len(pre_snr_tar_catalogue)))
    #applying snr filter if it is specified
    if snr_restriction != False:
        snr_filter = np.where(
            (pre_snr_tar_catalogue['peak_flux'] /
             pre_snr_tar_catalogue['local_rms']) >= snr_restriction)
        tar_catalogue = pre_snr_tar_catalogue[snr_filter]
    else:
        tar_catalogue = pre_snr_tar_catalogue

    #we're going to cast a wide net of 10' to find multiple matches per target source
    limiting_res = 600.0 / 3600.0 * u.degree

    #Using SkyCoord package to prepare the ref and tar catalogues for the cross match
    ref_cat = SkyCoord(ref_catalogue['ra'],
                       ref_catalogue['dec'],
                       unit=u.degree,
                       frame='icrs')
    tar_cat = SkyCoord(tar_catalogue['ra'],
                       tar_catalogue['dec'],
                       unit=u.degree,
                       frame='icrs')

    ref_cat_uuid = []
    tar_cat_uuid = []

    gross_matched_idx_ref, gross_matched_idx_tar, gross_matched_sep, dum = tar_cat.search_around_sky(
        ref_cat, limiting_res)
    if snr_restriction != False:
        tar_cat_matched_within_resolution = original_dist_tar_catalogue[
            snr_filter][gross_matched_idx_tar]
    else:
        tar_cat_matched_within_resolution = original_dist_tar_catalogue[
            gross_matched_idx_tar]

    #this is because the probability determination uses the adjusted positions
    tar_cat_for_prob = tar_catalogue[gross_matched_idx_tar]

    ref_cat_matched_within_resolution = ref_catalogue[gross_matched_idx_ref]

    for item in tar_cat_for_prob:
        if np.any(np.isin(tar_cat_uuid, item['uuid'])) == False:
            reference_matches = ref_cat_matched_within_resolution[np.where(
                tar_cat_for_prob['uuid'] == item['uuid'])]
            match = prob_comb(reference_matches, item, confidence_percentile,
                              single_candidate_confidence, flux_match,
                              final_run)
            if match != False:
                tar_uuid = match[0]
                ref_uuid = match[1]
                target_entry_idx = np.where(
                    original_dist_tar_catalogue['uuid'] == tar_uuid)
                target_entry = original_dist_tar_catalogue[target_entry_idx]
                reference_entry_idx = np.where(
                    ref_catalogue['uuid'] == ref_uuid)
                entry = hstack([
                    original_dist_tar_catalogue[target_entry_idx],
                    ref_catalogue[reference_entry_idx]
                ],
                               table_names=['tar', 'ref'],
                               uniq_col_name='{table_name}_{col_name}')
                entry.add_columns([
                    Column([match[2]], name='pos_prob'),
                    Column([match[3]], name='flux_prob'),
                    Column([match[4]], name='total_prob'),
                    Column([match[5]], name='norm_prob'),
                    Column([match[6]], name='num_of_candidates')
                ])
                try:
                    cross_matched_table = vstack([cross_matched_table, entry])
                except (NameError, TypeError):
                    cross_matched_table = entry
                ref_cat_uuid.append(ref_uuid)
                tar_cat_uuid.append(tar_uuid)

    random.shuffle(tar_cat_uuid)

    rejected_match_idx = []
    if len(cross_matched_table) > 0 and final_run == False:
        for i in range(0, len(tar_cat_uuid)):
            try:
                passed_catalogue = vstack(
                    [already_cross_matched, cross_matched_table])
            except TypeError:
                passed_catalogue = cross_matched_table
            if reject_outliers(passed_catalogue, tar_cat_uuid[i]) == 'reject':
                rejected_match_idx.append(i)
                rejected_entry_idx = np.where(
                    cross_matched_table['tar_uuid'] == tar_cat_uuid[i])[0][0]
                rejected_entry = copy(cross_matched_table[rejected_entry_idx])

                try:
                    rejected_catalogue.add_row(rejected_entry)
                except (NameError, TypeError):
                    rejected_catalogue = Table(rejected_entry)
                cross_matched_table.remove_row(
                    np.where(cross_matched_table['tar_uuid'] ==
                             tar_cat_uuid[i])[0][0])
    if final_run == False:
        if options.plotting == True:
            try:
                plot_rejections(
                    vstack([already_cross_matched, cross_matched_table]),
                    rejected_catalogue, run, options)
            except TypeError:
                plot_rejections(cross_matched_table, rejected_catalogue, run,
                                options)

    tar_cat_uuid = np.array(tar_cat_uuid)
    tar_cat_uuid = tar_cat_uuid[np.delete(np.arange(0, len(tar_cat_uuid)),
                                          rejected_match_idx)]

    #remove the correctly cross matched sources from the modelled position target catalogue and original position target catalogue
    new_tar_index_list = np.delete(
        np.arange(0, len(pre_snr_tar_catalogue)),
        np.arange(0, len(pre_snr_tar_catalogue))[np.isin(
            pre_snr_tar_catalogue['uuid'], tar_cat_uuid)])
    new_tar_catalogue = pre_snr_tar_catalogue[new_tar_index_list]
    new_original_dist_tar_catalogue = original_dist_tar_catalogue[
        new_tar_index_list]

    return cross_matched_table, ref_catalogue, new_tar_catalogue, new_original_dist_tar_catalogue
コード例 #38
0
    def process(self):
        self.info('Processing has been started')
        for image in self.images_list:
            self.info('Processing image: {}'.format(image.name))
            apertures, sigma_values_table = self._create_apertures(
                image, self.stars_coordinates[str(image.savart)], image.shape)

            out_table = []
            counts_tab = []
            counts_error_tab = []

            for aperture, sigma_value in zip(apertures, sigma_values_table):
                rawflux_table = aperture_photometry(image.data, aperture[0])
                bkgflux_table = aperture_photometry(image.data, aperture[1])
                phot_table = hstack([rawflux_table, bkgflux_table],
                                    table_names=['raw', 'bkg'])

                if self.config_section.get('bkg_annulus'):
                    self.info(
                        'Mean background value from annulus has been used.')
                    bkg_mean = phot_table['aperture_sum_bkg'] / aperture[
                        1].area()
                    bkg_sum = bkg_mean * aperture[0].area()
                    self.info('Mean background value\n {}'.format(bkg_mean))
                else:
                    self.info('Mean background value from mask \
                        sigma clipped stats has been used.')
                    bkg_mean = sigma_value.median
                    bkg_sum = bkg_mean * aperture[0].area()
                    self.info('Mean background value\n {}'.format(bkg_mean))

                final_sum = phot_table['aperture_sum_raw'] - bkg_sum
                phot_table['residual_aperture_sum'] = final_sum

                final_sum_error = self._calc_phot_error(
                    image.hdr, aperture, phot_table, bkgflux_table,
                    sigma_value.std)

                phot_table.add_column(
                    Column(name='residual_aperture_err_sum',
                           data=[final_sum_error]))

                phot_table['xcenter_raw'].shape = 1
                phot_table['ycenter_raw'].shape = 1
                phot_table['xcenter_bkg'].shape = 1
                phot_table['ycenter_bkg'].shape = 1

                out_table.append(phot_table)
                counts_tab.append(final_sum)
                counts_error_tab.append(final_sum_error)

            out_table = vstack(out_table)

            # self.measurements.append(
            #     SavartCounts(image.savart, image.jd,
            #         counts_tab, counts_error_tab))
            if image.savart not in self.measurements:
                self.measurements[image.savart] = [
                    SavartCounts(image.savart, image.jd, counts_tab,
                                 counts_error_tab)
                ]
            else:
                self.measurements[image.savart].append(
                    SavartCounts(image.savart, image.jd, counts_tab,
                                 counts_error_tab))

            if self.config_section.get('plot_images'):
                self._make_image_plot(image.data, apertures, image.name)

            self._save_image_output(out_table, image.name + '.csv')

        self.measurements = OrderedDict(sorted(self.measurements.items()))
        self._save_polarizaton_results()
コード例 #39
0
orig_to_obs = dict(zip(('inds','sep','_'), c_obs.match_to_catalog_sky(c_inp)))
obs_to_orig = dict(zip(('inds','sep','_'), c_inp.match_to_catalog_sky(c_obs)))

print("Observations match {0} out of {1} of the original leaves to 1 arcsec"
      .format((obs_to_orig['sep'] < 1*u.arcsec).sum(), len(pruned_orig_ppcat)))
print("Of the observations, {0} out of {1} have <1 arcsec matches to original sources"
      .format((obs_to_orig['sep'] < 1*u.arcsec).sum(), len(pruned_ppcat)))
print("Input has {0} out of {1} matches to the observed leaves within 1 arcsec"
      .format((orig_to_obs['sep'] < 1*u.arcsec).sum(), len(pruned_orig_ppcat)))

pruned_ppcat.add_column(Column(data=orig_to_obs['inds'],
                               name='match_inds'))
pruned_ppcat.add_column(Column(data=orig_to_obs['sep'],
                               name='match_separation'))
temporary = pruned_orig_ppcat[pruned_ppcat['match_inds']]
merged_orig_onto_obs = table.hstack([pruned_ppcat, temporary])

# ds9 -multiframe ../analysis/*perseus*.fits ../perseus_synth/perseus_250_2_model_tclean_clean_noise.fits -lock frames image -frame 2 -scale minmax -cmap sls -frame 3 -frame delete -frame 7 -frame delete -frame 6 -cmap sls -frame 8 -cmap sls -frame 4 -cmap sls -frame 5 -cmap value 8.5 0.05 -frame 9 -cmap value 12 0.03 -frame 1 -cmap value 8.5 0.05 -lock crosshairs image  &
import pylab as pl
from astropy.visualization import (MinMaxInterval, ManualInterval, AsinhStretch,
                                   ImageNormalize)

norm = ImageNormalize(data_original, interval=ManualInterval(-0.002,0.03),
                      stretch=AsinhStretch())

fig = pl.figure(1)
fig.clf()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=mywcs)
ax.imshow(data_original, cmap='gray_r', origin='lower', interpolation='none', norm=norm)
ax.plot(orig_ppcat['x_cen'], orig_ppcat['y_cen'], 'o', markeredgecolor='r', markerfacecolor='none', transform=ax.get_transform('world'))
コード例 #40
0
def find_close_objs(lo, lbcs, tolerance=5.):

    ## first filter the LBCS data on Flags
    lbcs_idx = np.where( np.logical_or(lbcs['Flags'] == 'O', lbcs['Flags'] == 'A' ) )
    lbcs = lbcs[lbcs_idx]
    ## get rid of anything with only X's
    nump = []
    nums = []
    for xx in range(len(lbcs)):
        nump.append(count_p(lbcs['Goodness'][xx]))
        nums.append(count_s(lbcs['Goodness'][xx]))
    print( np.array(nump) + np.array(nums) )
    lbcs_idx = np.where( np.array(nump)+np.array(nums) > 0 )[0]
    lbcs = lbcs[lbcs_idx]

    ## get RA and DEC for the catalogues
    lotss_coords = SkyCoord( lo['RA'], lo['DEC'], frame='icrs', unit='deg' )
    lbcs_coords = SkyCoord( lbcs['RA'], lbcs['DEC'], frame='icrs', unit='deg' )

    ## search radius 
    search_rad = 5. / 60. / 60. * u.deg

    ## loop through the lbcs coordinates -- this will be much faster than looping through lotss
    lotss_idx = []
    lbcs_idx = []    
    for xx in range(len(lbcs)):
        seps = lbcs_coords[xx].separation(lotss_coords)
        match_idx = np.where( seps < search_rad )[0]
	if len( match_idx ) == 0:
            # there's no match, move on to the next source
            m_idx = [-1]
            pass
        else:
	    if len( match_idx ) == 1:
                ## there's only one match
                m_idx = match_idx[0]
                lbcs_idx.append(xx)
                lotss_idx.append(m_idx)
            if len( match_idx ) > 1:
                ## there's more than one match, pick the brightest
                tmp = lo[match_idx]
                m_idx = np.where( tmp['Total_flux'] == np.max( tmp['Total_flux'] ) )[0]
                if not isinstance(m_idx,int):
		    m_idx = m_idx[0]
                lbcs_idx.append(xx)
                lotss_idx.append(m_idx) 
    lbcs_matches = lbcs[lbcs_idx]
    lotss_matches = lo[lotss_idx]

    combined = hstack( [lbcs_matches, lotss_matches], join_type='exact' )
    ## check if there are duplicate lbcs observations for a lotss source
    if len( np.unique( combined['Source_id'] ) ) != len( combined ):
        # there are duplicates
        print( 'There are duplicate LBCS sources, selecting the best candidate(s).' )
        src_ids = np.unique( combined['Source_id'] )
        good_idx = []
        for src_id in src_ids:
            idx = np.where( combined['Source_id'] == src_id )[0]
	    if len(idx) > 1:
		## multiple matches found.  Count P's first and then break ties with Goodness_FT 
		num_P = []
	        total_ft = []
		for yy in range( len( idx ) ):
		    tmp = combined[idx[yy]]['Goodness']
		    num_P.append( count_p( tmp ) )

		    tmp = combined[idx[yy]]['FT_Goodness']
                    total_ft.append( sum_digits( tmp ) )
                ## check that the total_ft values are non-zero before looking for a best cal
                if np.max( total_ft ) > 0:
	            ## pick the one with the highest number of P's -- if tie, use total_ft
		    best_idx = np.where( num_P == np.max( num_P ) )[0]  ## this is an array
	            if len( best_idx ) == 1:
		        good_idx.append(idx[best_idx][0])  ## idx[best_idx][0] is a number
		    if len( best_idx ) > 1:
		        currentmax = 0.0 
                        for i in range(0,len(best_idx)):
		            if total_ft[best_idx[i]] > currentmax:
			        currentmax = total_ft[best_idx[i]]
                                ft_idx = i
		        good_idx.append( idx[best_idx[ft_idx]] )
                else:
                    print( 'Duplicate sources have total_ft = 0, removing from results.' )
            else:
		good_idx.append(idx[0])

	result = combined[good_idx]
    else:
	print( 'No duplicate sources found' )
        result = combined
    ## rename RA columns
    result.rename_column('RA_1','RA_LBCS')
    result.rename_column('DEC_1','DEC_LBCS')
    result.rename_column('RA_2','RA')
    result.rename_column('DEC_2','DEC')

    return result
コード例 #41
0
ファイル: obs.py プロジェクト: abhijeetmanhas/sbpy
    def supplement(self, service='jplhorizons', id_field='targetname',
                   epoch_field='epoch', location='500',
                   modify_fieldnames='obs', **kwargs):
        """Supplement observational data with ephemerides
        queried from the selected service.

        Parameters
        ----------
        service : str, optional
            Service from which to acquire data: ``'jplhorizons'``,
            ``'mpc'``, or ``'miriade'``, corresponding to the
            `JPL Horizons system <https://ssd.jpl.nasa.gov/horizons.cgi>`_
            (using `~sbpy.data.Ephem.from_horizons`),
            the `Minor Planet Center ephemeris service
            <https://minorplanetcenter.net/iau/MPEph/MPEph.html>`_
            (using `~sbpy.data.Ephem.from_mpc`), and
            the `IMCCE Miriade service
            <http://vo.imcce.fr/webservices/miriade/>`_
            (using `~sbpy.data.from_miriade`). Default:
            ``'jplhorizons'``
        id_field : str, optional
            Field name that corresponds to a suitable target identifier in
            this `~sbpy.data.Obs` object. Default: ``'targetname'``
        epoch_field : str, optional
            Field name that corresponds to a suitable epoch identifier in
            this `~sbpy.data.Obs` object. The corresponding column must be
            of type `~astropy.time.Time`. Default: ``'epoch'``
        location : str, optional
            Location of the observer for the data stored in this
            `~sbpy.data.Obs` object. Default: ``'500'`` (geocenter)
        modify_fieldnames : str, optional
            Defines whether field names in this `~sbpy.data.Obs` object
            (``'obs'``) or in the supplemental data to be queried (``'eph'``)
            will be modified by adding a suffix in case of field name
            collisions. Default: ``'obs'``
        **kwargs : optional
            Additional keyword arguments are passed to the corresponding
            ephemerides query service.

        Returns
        -------
        `~Obs` object
            The resulting object will contain all data from this
            `~sbpy.data.Obs` object as well as the queried ephemeris data.

        Notes
        -----
        * Not all available service are equally suited for this kind
          of query: only the JPL Horizons system enables quick queries
          for a large number of epochs. Queries using the other
          services may take a long time depending on the number of
          epochs and targets.


        Examples
        --------
        >>> from sbpy.data import Obs
        >>> obs = Obs.from_mpc('2019 AA', id_type='asteroid designation') # doctest: +SKIP
        >>> data = obs.supplement(id_field='designation') # doctest: +SKIP
        >>> data.field_names # doctest: +SKIP
        <TableColumns names=('number','desig','discovery','note1','note2','epoch','RA_obs','DEC_obs','mag','band','observatory','target','RA','DEC','delta','V','alpha','elong','RAcosD_rate','DEC_rate','delta_rate')>
        """

        try:
            targetids = set(self[id_field])
        except (TypeError, KeyError):
            raise QueryError('cannot use field {} as id_field.'.format(
                id_field))

        all_obs = None
        all_eph = None
        for targetid in targetids:

            if all_obs is None:
                all_obs = self.table[self[id_field] == targetid]
            else:
                all_obs = vstack([all_obs,
                                  self.table[self[id_field] == targetid]])

            if service == 'jplhorizons':
                eph = Ephem.from_horizons(
                    targetid,
                    epochs=self[self[id_field] == targetid][epoch_field],
                    location=location,
                    **kwargs)
                eph.table.remove_column('epoch')
            elif service == 'mpc':
                eph = Ephem.from_mpc(
                    targetid,
                    epochs=self[self[id_field] == targetid][epoch_field],
                    location=location,
                    **kwargs)
                eph.table.remove_column('Date')
            elif service == 'miriade':
                eph = Ephem.from_miriade(
                    targetid,
                    epochs=self[self[id_field] == targetid][epoch_field],
                    location=location,
                    **kwargs)
                eph.table.remove_column('epoch')
            else:
                raise QueryError('service {} not known.'.format(service))

            if all_eph is None:
                all_eph = eph.table
            else:
                all_eph = vstack([all_eph, eph.table])

        # identify field names that both obs and eph have in common
        fieldnames_intersect = set(all_eph.columns).intersection(
            all_obs.columns)
        for fieldname in fieldnames_intersect:
            if modify_fieldnames == 'obs':
                all_obs.rename_column(fieldname, fieldname+'_obs')
            elif modify_fieldnames == 'eph':
                all_eph.rename_column(fieldname, fieldname+'_eph')

        return Obs.from_table(hstack([all_obs, all_eph]),
                              meta=self.meta)
コード例 #42
0
    def _consolidateFitsData(cls, dcrHdu, ifHdu):
        """Given DCR and IF HDU objects, pull out the information needed
        to perform calibration into a single Astropy Table, then return it"""

        # STATE describes the phases in use
        dcrStateTable = cls.getTableByName(dcrHdu, 'STATE')
        # DATA contains the actual data recorded by the DCR
        dcrDataTable = cls.getTableByName(dcrHdu, 'DATA')

        # How many unique CAL states are there?
        calStates = numpy.unique(dcrStateTable['CAL'])
        # There should be only 1 or 2 possible states
        if list(calStates) not in [[0], [1], [0, 1]]:
            raise ValueError(
                "Invalid CAL states detected in DCR.RECEIVER.CAL: {}".format(
                    calStates))

        # How many unique SIGREF states are there?
        sigRefStates = numpy.unique(dcrStateTable['SIGREF'])
        # There should be only 1 or 2 possible states
        if list(sigRefStates) not in [[0], [1], [0, 1]]:
            raise ValueError("Invalid SIGREF states detected in "
                             "DCR.RECEIVER.SIGREF: {}".format(sigRefStates))

        # DCR data from IF table
        ifDcrDataTable = cls.getIfDataByBackend(ifHdu)

        if len(numpy.unique(ifDcrDataTable['RECEIVER'])) != 1:
            raise ValueError("There must only be one RECEIVER per scan!")

        ifDcrDataTable.meta['RECEIVER'] = ifDcrDataTable['RECEIVER'][0]
        # Strip out unneeded/misleading columns
        filteredIfTable = ifDcrDataTable['FEED', 'RECEPTOR', 'POLARIZE',
                                         'CENTER_SKY', 'BANDWDTH', 'PORT',
                                         'HIGH_CAL']

        # Each of these rows actually has a maximum of four possible states:
        # | `SIGREF` | `CAL` |      Phase key       | Phase index |
        # |----------|-------|----------------------|-------------|
        # |        0 |     0 | `Signal / No Cal`    |           0 |
        # |        0 |     1 | `Signal / Cal`       |           1 |
        # |        1 |     0 | `Reference / No Cal` |           2 |
        # |        1 |     1 | `Reference / Cal`    |           3 |

        # So, let's get the number of states for this specific dataset
        # by querying the DCR STATE table. Note that this is a scalar value
        # that indicates how many phases the data from each port has been
        # taken during
        numPhasesPerPort = len(numpy.unique(dcrStateTable['SIGREF', 'CAL']))

        # Then we can stack our IF table n times, where n is numPhasesPerPort
        filteredIfTable = vstack([filteredIfTable] * numPhasesPerPort)

        filteredIfTable.sort('PORT')

        # We now have a table that is the correct final size.
        # But, it does not yet have the SIGREF and CAL columns

        # Before we add those, we need to make them the right length.
        # We do that by stacking a slice of the state table containing only
        # those two columns n times, where n is the number of rows in the IF
        # DCR table.
        try:
            expandedStateTable = vstack([dcrStateTable['SIGREF', 'CAL']] *
                                        len(ifDcrDataTable))
        except TypeError:
            logger.error(
                "Could not stack DCR table. Is length of ifDcrDataTable 0? {}".
                format(len(ifDcrDataTable)))
            logger.error(ifDcrDataTable)
            raise

        # Delete this meta key; we don't need it and it
        # results in a warning
        del expandedStateTable.meta['EXTNAME']
        # We now have two tables, both the same length, and they can be simply
        # stacked together horizontally.
        filteredIfTable = hstack([filteredIfTable, expandedStateTable])

        # We now have a table that maps physical attributes to the different
        # states in which data was taken. That is, for each feed we have rows
        # that map it to the various SIGREF and CAL states that were active at
        # some point during the scan.
        # So, we now need to map these physical attributes to the actual data!

        # Get the sets of unique SIGREF and CAL states. Note that this could
        # _probably_ be done by simply grabbing the whole column from
        # dcrStateTable, but this way we guarantee uniqueness.
        uniquePorts = numpy.unique(filteredIfTable['PORT'])
        uniqueSigRefStates = numpy.unique(filteredIfTable['SIGREF'])
        uniqueCalStates = numpy.unique(filteredIfTable['CAL'])

        phaseStateTable = dcrStateTable['SIGREF', 'CAL']
        phaseStateTable.add_column(
            Column(name='PHASE', data=numpy.arange(len(phaseStateTable))))

        filteredIfTable.add_column(
            Column(name='DATA',
                   dtype=dcrDataTable['DATA'].dtype,
                   shape=dcrDataTable['DATA'].shape[0],
                   length=len(filteredIfTable)))

        # This is a reasonable assert to make, but it will fail when the IF FITS
        # only has a *subset* of the ports used by the DCR.  Sparrow ignores ports
        # NOT specified by the IF FITS file, wo we'll do the same
        #assert len(uniquePorts) == dcrDataTable['DATA'].shape[1]
        if len(uniquePorts) != dcrDataTable['DATA'].shape[1]:
            logger.warning("IF ports are only a subset of DCR ports used")
        # TODO: I wonder if there is a way to avoid looping here altogether?
        for portIndex, port in enumerate([port + 1 for port in uniquePorts]):
            # TODO: Combine these into one?
            for sigRefStateIndex, sigRefState in enumerate(uniqueSigRefStates):
                for calStateIndex, calState in enumerate(uniqueCalStates):
                    phaseMask = ((phaseStateTable['SIGREF'] == sigRefState) &
                                 (phaseStateTable['CAL'] == calState))
                    # Assert that the mask doesn't match more than one row
                    if numpy.count_nonzero(phaseMask) != 1:
                        raise ValueError("PHASE could not be unambiguously "
                                         "determined from given SIGREF ({}) "
                                         "and CAL ({})".format(
                                             sigRefState, calState))
                    phase = phaseStateTable[phaseMask]['PHASE'][0]
                    # Calculate the index of our 1D column from where we are
                    # in our 3D data
                    dataColumnIndex = (
                        (portIndex *
                         (len(uniqueSigRefStates) * len(uniqueCalStates))) +
                        (sigRefStateIndex * len(uniqueCalStates)) +
                        calStateIndex)
                    # Slice out our data. We want data from every row of DATA (::),
                    # but we only want the data in the nth dimension of the
                    # mth dimension, where m = portIndex and n = phase
                    dataRow = dcrDataTable['DATA'][::, portIndex, phase]
                    filteredIfTable['DATA'][dataColumnIndex] = dataRow

        projPath = os.path.dirname(os.path.dirname(dcrHdu.filename()))
        filteredIfTable.meta['PROJPATH'] = os.path.realpath(projPath)

        return filteredIfTable
コード例 #43
0
ファイル: photometry.py プロジェクト: LejayChen/photutils
    def do_photometry(self, image, positions=None):
        """
        Perform PSF photometry in ``image``.

        This method assumes that ``psf_model`` has centroids and flux
        parameters which will be fitted to the data provided in
        ``image``. A compound model, in fact a sum of ``psf_model``,
        will be fitted to groups of stars automatically identified by
        ``group_maker``. Also, ``image`` is not assumed to be background
        subtracted.  If positions are not ``None`` then this method
        performs forced PSF photometry, i.e., the positions are assumed
        to be known with high accuracy and only fluxes are fitted. If
        the centroid positions are set as ``fixed`` in the PSF model
        ``psf_model``, then the optimizer will only consider the flux as
        a variable. Otherwise, ``positions`` will be used as initial
        guesses for the centroids.

        Parameters
        ----------
        image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
            Image to perform photometry.
        positions: `~astropy.table.Table`
            Positions (in pixel coordinates) at which to *start* the fit
            for each object. Columns 'x_0' and 'y_0' must be present.
            'flux_0' can also be provided to set initial fluxes.

        Returns
        -------
        outtab : `~astropy.table.Table`
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process.
        residual_image : array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
            Residual image calculated by subtracting the fitted sources
            and the original image.
        """

        if self.bkg_estimator is None:
            residual_image = image.copy()
        else:
            residual_image = image - self.bkg_estimator(image)

        if self.aperture_radius is None:
            if hasattr(self.psf_model, 'fwhm'):
                self.aperture_radius = self.psf_model.fwhm.value
            elif hasattr(self.psf_model, 'sigma'):
                self.aperture_radius = (self.psf_model.sigma.value *
                                        gaussian_sigma_to_fwhm)

        if positions is None:
            if self.finder is None:
                raise ValueError('Finder cannot be None if positions are '
                                 'not given.')

            outtab = Table([[], [], [], [], [], []],
                           names=('id', 'group_id', 'iter_detected', 'x_fit',
                                  'y_fit', 'flux_fit'),
                           dtype=('i4', 'i4', 'i4', 'f8', 'f8', 'f8'))

            intab = Table([[], [], []],
                          names=('x_0', 'y_0', 'flux_0'),
                          dtype=('f8', 'f8', 'f8'))

            sources = self.finder(residual_image)

            apertures = CircularAperture(
                (sources['xcentroid'], sources['ycentroid']),
                r=self.aperture_radius)

            sources['aperture_flux'] = aperture_photometry(
                residual_image, apertures)['aperture_sum']
            n = 1
            while (len(sources) > 0
                   and (self.niters is None or n <= self.niters)):
                init_guess_tab = Table(names=['x_0', 'y_0', 'flux_0'],
                                       data=[
                                           sources['xcentroid'],
                                           sources['ycentroid'],
                                           sources['aperture_flux']
                                       ])
                intab = vstack([intab, init_guess_tab])
                star_groups = self.group_maker(init_guess_tab)
                result_tab, residual_image = self.nstar(
                    residual_image, star_groups)
                result_tab['iter_detected'] = \
                    n*np.ones(result_tab['x_fit'].shape, dtype=np.int)

                outtab = vstack([outtab, result_tab])
                sources = self.finder(residual_image)

                if len(sources) > 0:
                    apertures = CircularAperture(
                        (sources['xcentroid'], sources['ycentroid']),
                        r=self.aperture_radius)
                    sources['aperture_flux'] = \
                        aperture_photometry(residual_image,
                                            apertures)['aperture_sum']
                n += 1

            outtab = hstack([intab, outtab])
        else:
            if 'flux_0' not in positions.colnames:
                apertures = CircularAperture(
                    (positions['x_0'], positions['y_0']),
                    r=self.aperture_radius)

                positions['flux_0'] = aperture_photometry(
                    residual_image, apertures)['aperture_sum']

            intab = Table(
                names=['x_0', 'y_0', 'flux_0'],
                data=[positions['x_0'], positions['y_0'], positions['flux_0']])

            star_groups = self.group_maker(intab)
            outtab, residual_image = self.nstar(residual_image, star_groups)
            outtab = hstack([intab, outtab])

        return outtab, residual_image
コード例 #44
0
# -------------------------------------------------------------------------------
# normalize spectra
# -------------------------------------------------------------------------------
if parallaxes_tgas:
    file_name = 'apogee_spectra_norm.pickle'
else:
    file_name = 'apogee_spectra_norm_hip.pickle'
destination = './data/' + file_name
if not os.path.isfile(destination):
    data_norm, continuum = LoadAndNormalizeData(apogee_data['FILE'],
                                                file_name,
                                                destination='./data/spectra/')

# -------------------------------------------------------------------------------
# save files!
# -------------------------------------------------------------------------------
if parallaxes_tgas:
    apogee_data = Table(apogee_data)
    tgas_data = Table(tgas_data)
    training_labels = hstack([apogee_data, tgas_data])

    f = open('data/training_labels_apogee_tgas.pickle', 'w')
    pickle.dump(training_labels, f)
    f.close()
else:
    f = open('data/training_labels_apogee_hip.pickle', 'w')
    pickle.dump(apogee_data, f)
    f.close()

# -------------------------------------------------------------------------------'''
コード例 #45
0
def measure_specvels(spectra, profile, linewl, l2=0., viewwidth=20., corner=False):
    params = []
    if profile == 'two':
        nparams = 6
        figsize = (5, 15)
    elif profile == 'pcyg':
        nparams = 6
        figsize = (5, 15)
    elif profile == 'emis':
        nparams = 4
        figsize = (5, 10)
    elif profile == 'bc' or profile == 'rc':
        nparams = 5
        figsize = (5, 12.5)
    elif profile == 'pcygbc':
        nparams = 7
        figsize = (5, 15)
    elif profile == 'twobc':
        nparams = 7
        figsize = (5, 15)
    else:
        raise ValueError('unrecognized profile type')
    hr = [4] + [1] * nparams
    gs = GridSpec(nparams + 1, 2, height_ratios=hr)
    for spec in spectra:
        good = ~np.isnan(spec['flux'])
        wl = spec['wl'][good]
        flux = spec['flux'][good]
        yshown = flux[(wl > linewl - viewwidth) & (wl < linewl + viewwidth)]
        if len(yshown) == 0:
            print('out of range for', spec['filename'], '(phase = {:+.1f})'.format(spec['phase']))
            params.append([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])
            continue
        f = plt.figure(figsize=figsize)
        ax1 = plt.subplot(gs[0:2])
        ax1.step(wl, flux)
        ax1.axvline(linewl, color='k', linestyle='-.', zorder=0)
        ax1.axis([linewl - viewwidth, linewl + viewwidth, np.min(yshown), np.max(yshown)])
        ax1.xaxis.tick_top()
        ax1.xaxis.set_ticks_position('both')

        l = plt.Line2D([], [])
        ax1.add_artist(l)

        ax2 = [plt.subplot(gs[2])]
        ax3 = [plt.subplot(gs[3])]
        for row in range(2, nparams + 1):
            ax2.append(plt.subplot(gs[2 * row], sharex=ax2[0]))
            ax3.append(plt.subplot(gs[2 * row + 1], sharex=ax3[0]))
        for ax in ax3:
            ax.yaxis.tick_right()
            ax.yaxis.set_ticks_position('both')
        for ax in ax2[:-1] + ax3[:-1]:
            plt.setp(ax.get_xticklabels(), visible=False)
        plt.subplots_adjust(left=0.11, right=0.89, top=0.97, bottom=0.03, hspace=0, wspace=0)

        dr = LinePlot(l, wl, flux, ax2, ax3, profile, linewl, l2, corner)
        dr.connect()
        plt.show()

        # cont = input('Press enter to accept these values or r to reject them.\n')
        # plt.clf()
        # if dr.corner:
        #     plt.figure(1)
        #     plt.clf()
        # if cont == 'r':
        #     print('fit rejected')
        #     params.append([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])
        # else:
        params.append(dr.params)

    tparams = Table(rows=params, names=('v', 'dv', 'EW', 'dEW', 'flux', 'dflux'))
    for col in tparams.colnames:
        tparams[col].format = '%.3e'
    tout = hstack([spectra[['filename', 'date', 'telescope', 'instrument', 'phase']], tparams])
    tout[['filename', 'v', 'EW', 'flux']].pprint(max_width=-1, max_lines=-1)
    plot_results(tout)
    return tout
コード例 #46
0
ファイル: photometry.py プロジェクト: aboucaud/photutils
    def do_photometry(self, image, init_guesses=None):
        """
        Perform PSF photometry in ``image``.

        This method assumes that ``psf_model`` has centroids and flux
        parameters which will be fitted to the data provided in
        ``image``. A compound model, in fact a sum of ``psf_model``,
        will be fitted to groups of stars automatically identified by
        ``group_maker``. Also, ``image`` is not assumed to be background
        subtracted.  If ``init_guesses`` are not ``None`` then this method
        uses ``init_guesses`` as initial guesses for the centroids. If
        the centroid positions are set as ``fixed`` in the PSF model
        ``psf_model``, then the optimizer will only consider the flux as
        a variable.

        Parameters
        ----------
        image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
            Image to perform photometry.
        init_guesses: `~astropy.table.Table`
            Table which contains the initial guesses (estimates) for the set
            of parameters. Columns 'x_0' and 'y_0' which represent the
            positions (in pixel coordinates) for each object must be present.
            'flux_0' can also be provided to set initial fluxes.
            If 'flux_0' is not provided, aperture photometry is used to
            estimate initial values for the fluxes. Additional columns of the
            form '<parametername>_0' will be used to set the initial guess for
            any parameters of the ``psf_model`` model that are not fixed.

        Returns
        -------
        output_tab : `~astropy.table.Table` or None
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process. Uncertainties on the fitted parameters are
            reported as columns called ``<paramname>_unc`` provided that the
            fitter object contains a dictionary called ``fit_info`` with
            the key ``param_cov``, which contains the covariance matrix. If
            ``param_cov`` is not present, uncertanties are not reported.
        """

        if self.bkg_estimator is not None:
            image = image - self.bkg_estimator(image)

        if self.aperture_radius is None:
            if hasattr(self.psf_model, 'fwhm'):
                self.aperture_radius = self.psf_model.fwhm.value
            elif hasattr(self.psf_model, 'sigma'):
                self.aperture_radius = (self.psf_model.sigma.value *
                                        gaussian_sigma_to_fwhm)

        if init_guesses is not None:
            # make sure the code does not modify user's input
            init_guesses = init_guesses.copy()

        if init_guesses is not None:
            if self.aperture_radius is None:
                if 'flux_0' not in init_guesses.colnames:
                    raise ValueError(
                        'aperture_radius is None and could not be '
                        'determined by psf_model. Please, either '
                        'provided a value for aperture_radius or '
                        'define fwhm/sigma at psf_model.')

            if self.finder is not None:
                warnings.warn(
                    'Both init_guesses and finder are different than '
                    'None, which is ambiguous. finder is going to '
                    'be ignored.', AstropyUserWarning)

            if 'flux_0' not in init_guesses.colnames:
                apertures = CircularAperture(
                    (init_guesses['x_0'], init_guesses['y_0']),
                    r=self.aperture_radius)

                init_guesses['flux_0'] = aperture_photometry(
                    image, apertures)['aperture_sum']
        else:
            if self.finder is None:
                raise ValueError('Finder cannot be None if init_guesses are '
                                 'not given.')
            sources = self.finder(image)
            if len(sources) > 0:
                apertures = CircularAperture(
                    (sources['xcentroid'], sources['ycentroid']),
                    r=self.aperture_radius)

                sources['aperture_flux'] = aperture_photometry(
                    image, apertures)['aperture_sum']

                init_guesses = Table(names=['x_0', 'y_0', 'flux_0'],
                                     data=[
                                         sources['xcentroid'],
                                         sources['ycentroid'],
                                         sources['aperture_flux']
                                     ])

        self._define_fit_param_names()
        for p0, param in self._pars_to_set.items():
            if p0 not in init_guesses.colnames:
                init_guesses[p0] = len(init_guesses) * [
                    getattr(self.psf_model, param).value
                ]

        star_groups = self.group_maker(init_guesses)
        output_tab, self._residual_image = self.nstar(image, star_groups)

        star_groups = star_groups.group_by('group_id')
        output_tab = hstack([star_groups, output_tab])

        return output_tab
コード例 #47
0
def match(cat1, cat2, path, write, properties=None):
    if not os.path.isfile(cat1):
        cat1 = cat1.replace(cat1.split('_')[-1],
                            'z0.20.fits')  # use as dummy to get columns
        HI = False

    else:
        HI = True

    cat_name_HI = cat1
    cat_name_cont = cat2

    cat_fits_HI = fits.open(cat_name_HI)
    cat_fits_cont = fits.open(cat_name_cont)

    cat_HI = cat_fits_HI[1].data
    cat_cont = cat_fits_cont[1].data

    cols_HI = cat_fits_HI[1].columns.names
    cols_cont = cat_fits_cont[1].columns.names

    cat_HI_table = Table.read(cat_name_HI, format='fits')
    cat_cont_table = Table.read(cat_name_cont)

    for i in range(len(cols_cont)):
        if cols_cont[i] in cols_HI:
            cols_cont[i] = cols_cont[i] + '_1'

    # how to convert a recarray or fits table to np array:
    cat_HI_np = np.array(cat_HI_table).view(np.float32).reshape(
        (np.array(cat_HI_table).shape + (-1, )))
    cat_cont_np = np.array(cat_cont_table).view(np.float32).reshape(
        (np.array(cat_cont_table).shape + (-1, )))

    print(cols_cont, cols_HI)

    if HI:
        print('cat lengths', cat1.split('/')[-1], len(cat_HI), len(cat_cont))

        MHI_HI = cat_HI['MHI']
        MH_HI = cat_HI['Mh']
        #r_HI_HI = cat_HI['HI size']
        line_flux_HI = cat_HI['HI flux'] / 1000  # from mJy to Jy
        incl_HI = cat_HI['inclination']
        z_HI = cat_HI['redshift']
        OptClass_HI = cat_HI['OptClass']

        MHI_cont = cat_cont['MHI_pred']
        MH_cont = cat_cont['Mh_1']
        #r_HI_cont = cat_cont['HI size']
        incl_cont = cat_cont['inclination_1']
        z_cont = cat_cont['redshift_1']
        mass_function = 0

        if mass_function:
            cont_optclasses = [
                MHI_cont[cat_cont['RadioClass'] == 1],
                MHI_cont[cat_cont['RadioClass'] > 3]
            ]
            labels = ['late-type', 'late-type + AGN']
            colors = ['red', 'pink']

            plt.clf()

            norm = False
            #plt.hist(MHI_HI,range = (7.5, 12), bins = 100,log=True, histtype='step', fill=False,label = 'MHI', alpha = 1, normed=norm)
            #plt.hist(cont_optclasses,range = (7.5, 12),stacked = True, histtype='step', fill=False,bins = 100, log=True, alpha = 1, normed=norm, color = colors, label = labels)

            plt.legend()
            plt.xlabel(r'log MHI (M$_{\odot}$)')
            plt.ylabel('N sources')
            plt.title(cat2.split('_')[-1].split('.fits')[0])
            plt.savefig('cross/plots/HI_number_counts%s.png' %
                        cat2.split('_')[-1].split('.fits')[0])
            return

        #work out line_flux_pred from MHI_pred and dont match any continuum sources with line_flux_pred below line flux count

        H = 67.0
        M = 0.32
        L = 0.68
        c = 2.99792458e8
        G = 6.67408e-11
        cosmo = LambdaCDM(H0=H, Om0=M, Ode0=L)
        D_L_cont = cosmo.luminosity_distance(z_cont).value  # Mpc

        line_flux_cont = 10**MHI_cont / (49.8 * D_L_cont**2)

        print(MHI_cont)
        print(len(cat_cont), 'continuum sources')
        print(len(cat_cont[line_flux_cont >= line_flux_cut]),
              'continuum sources predict HI flux above HI cut')
        print(len(cat_cont[line_flux_cont < line_flux_cut]),
              'continuum sources will not be matched with HI')
        print(len(cat_HI), 'HI sources')
        print(
            len(cat_HI) - len(cat_cont[line_flux_cont >= line_flux_cut]),
            'HI sources will not be matched with continuum')
        print(
            len(cat_cont) + len(cat_HI) -
            len(cat_cont[line_flux_cont >= line_flux_cut]),
            'unique sources in catalogue')

        unmatched_cont = cat_cont_np[line_flux_cont < line_flux_cut]

        unmatched_cont_empty = np.zeros(
            (unmatched_cont.shape[0], cat_HI_np.shape[1])) - 100

        unmatched_cont_stack = np.hstack(
            (unmatched_cont_empty, unmatched_cont))

        matched_cat_cont_np = cat_cont_np[line_flux_cont >= line_flux_cut]

        # find lowest N MHI sources in HI cat, where N is the number of surplus HI sources after matching
        # with all continuum sources with predicted flux over HI flux threshold

        N_unmatched_HI = len(cat_HI) - len(
            cat_cont[line_flux_cont >= line_flux_cut])
        print('N_unmatched_HI', N_unmatched_HI)
        # catch values less than zero
        N_unmatched_HI = np.max((N_unmatched_HI, 0))
        print('N_unmatched_HI', N_unmatched_HI)

        print(line_flux_cont.shape)
        print(line_flux_HI.shape)
        # value of MHI_HI of Nth lowest source after sorting in order of MHI_HI
        sorted_line_flux_HI = np.sort(line_flux_HI)
        HI_cat_line_flux_cut = sorted_line_flux_HI[N_unmatched_HI]
        print('all HI sources with line flux below', HI_cat_line_flux_cut,
              'Jy will not be matched')

        unmatched_HI = cat_HI_np[line_flux_HI < HI_cat_line_flux_cut]

        unmatched_HI_empty = np.zeros(
            (unmatched_HI.shape[0], cat_cont_np.shape[1])) - 100

        unmatched_HI_stack = np.hstack((unmatched_HI, unmatched_HI_empty))

        matched_cat_HI_np = cat_HI_np[line_flux_HI >= HI_cat_line_flux_cut]

        all_cols = cols_HI + cols_cont

        unmatched_HI_table = Table()
        for i, col in enumerate(all_cols):
            unmatched_HI_table[col] = unmatched_HI_stack[:, i]

        unmatched_cont_table = Table()
        for i, col in enumerate(all_cols):
            unmatched_cont_table[col] = unmatched_cont_stack[:, i]

        matched_MHI_HI = MHI_HI[line_flux_HI >= HI_cat_line_flux_cut]
        matched_MHI_cont = MHI_cont[line_flux_cont >= line_flux_cut]
        print(matched_MHI_HI.shape, matched_MHI_cont.shape)

        sorted_matched_MHI_HI = matched_MHI_HI[np.argsort(matched_MHI_HI)]

        sorted_matched_MHI_cont = matched_MHI_cont[np.argsort(
            matched_MHI_cont)]
        for i in range(len(sorted_matched_MHI_HI)):
            print(sorted_matched_MHI_HI[i], sorted_matched_MHI_cont[i])

        print(sorted_matched_MHI_cont.shape)
        print(sorted_matched_MHI_HI.shape)
        both = np.vstack((sorted_matched_MHI_HI, sorted_matched_MHI_cont))
        print(both)

        matched_cat_HI_np = matched_cat_HI_np[np.argsort(matched_MHI_HI)]
        newcat2 = matched_cat_cont_np[np.argsort(matched_MHI_cont)]

        # now only need to match the matched catalogues, and reserve the unmatched to stack at the end

        matching_cat_HI_table = Table()
        for i, col in enumerate(cols_HI):
            matching_cat_HI_table[col] = matched_cat_HI_np[:, i]

    elif not HI:
        # make a numpy array for now

        newcat1 = np.zeros((cat_cont_np.shape[0], cat_HI_np.shape[1])) - 100
        matching_cat_HI_table = Table()
        for i, col in enumerate(cols_HI):
            matching_cat_HI_table[col] = newcat1[:, i]

        newcat2 = cat_cont_np

    # might need to make HI table from np array here as it is reordered

    # make it into a fits table
    cat = Table()
    for i, col in enumerate(cols_cont):
        cat[col] = newcat2[:, i]

    t_new = hstack([matching_cat_HI_table, cat])

    plt.clf()

    plt.scatter(t_new[t_new['OptClass_1'] == 2]['MHI'],
                t_new[t_new['OptClass_1'] == 2]['MHI_pred'],
                label='spirals')
    plt.scatter(t_new[t_new['OptClass_1'] == 1]['MHI'],
                t_new[t_new['OptClass_1'] == 1]['MHI_pred'],
                label='ellipticals')
    plt.xlabel(r'log MHI (M$_{\odot}$)')
    plt.ylabel(r'log MHI_pred (M$_{\odot}$)')
    plt.legend()
    plt.savefig(path + 'plots/MHI_pred_vs_MHI%s.png' %
                cat2.split('_')[-1].split('.fits')[0])

    plt.clf()

    plt.scatter(t_new[t_new['OptClass_1'] == 2]['MHI'],
                t_new[t_new['OptClass_1'] == 2]['MHI'] -
                t_new[t_new['OptClass_1'] == 2]['MHI_pred'],
                label='spirals')
    plt.scatter(t_new[t_new['OptClass_1'] == 1]['MHI'],
                t_new[t_new['OptClass_1'] == 1]['MHI'] -
                t_new[t_new['OptClass_1'] == 1]['MHI_pred'],
                label='ellipticals')
    plt.xlabel(r'log MHI (M$_{\odot}$)')
    plt.ylabel(r'log MHI - log MHI_pred (M$_{\odot}$)')
    plt.legend()
    plt.savefig(path + 'plots/MHI_pred_vs_MHI_res%s.png' %
                cat2.split('_')[-1].split('.fits')[0])

    if HI:

        # vstack the unmatched here
        t_new_all = vstack([t_new, unmatched_HI_table, unmatched_cont_table])
    else:
        t_new_all = t_new

    plot_MHI_dist = 0
    if plot_MHI_dist:
        plt.clf()
        plt.hist(t_new[t_new['OptClass_1'] == 2]['MHI_pred'],
                 alpha=0.5,
                 label='spirals')
        plt.hist(t_new[t_new['OptClass_1'] == 1]['MHI_pred'],
                 alpha=0.5,
                 label='ellipticals')
        plt.xlabel(r'log MHI_pred (M$_{\odot}$)')
        plt.ylabel('N')
        plt.legend()
        plt.savefig('MHI_pred%s.png' % cat2.split('_')[-1].split('.fits')[0])
        plt.clf()

        plt.hist(t_new[(t_new['RadioClass'] == 1)]['MHI_pred'],
                 alpha=0.5,
                 label='SFG-late')
        plt.hist(t_new[(t_new['RadioClass'] == 2)]['MHI_pred'],
                 alpha=0.5,
                 label='SFG-early')
        plt.hist(t_new[(t_new['RadioClass'] > 3)]['MHI_pred'],
                 alpha=0.5,
                 label='AGN')
        plt.xlabel(r'log MHI_pred (M$_{\odot}$)')
        plt.ylabel('N')
        plt.legend()
        plt.savefig('MHI_pred_radioclass%s.png' %
                    cat2.split('_')[-1].split('.fits')[0])
        plt.clf()

    outf = (path + cat2.split('/')[-1].replace("continuum", "X"))
    print('writing to.. ', outf)
    if write:
        t_new_all.write(outf, format='fits', overwrite=True)
コード例 #48
0
ファイル: db.py プロジェクト: richardgmcmahon/OM10
    def paint(self,
              Nmax=None,
              verbose=False,
              lrg_input_cat='$OM10_DIR/data/LRGo.txt',
              qso_input_cat='$OM10_DIR/data/QSOo.txt',
              synthetic=False):
        """
        Add new columns to the table, for the magnitudes in various filters.

        Parameters
        ----------
        synthetic : boolean
            Use `lenspop` to make synthetic magnitudes in various filters
        target : string
            Paint lenses ('lens') or sources ('source')
        lrg_input_cat : string
            Name of LRG catalog, if not using synthetic paint
        qso_input_cat : string
            Name of QSO catalog, if not using synthetic paint
        verbose : boolean
           print progress to stdout

        Notes
        -----
        * Synthetic painting is very slow, as we loop over each object.
        * The treatment of QSOs may be flawed: the offset calculation has not
          been tested.

        """

        if synthetic == False:
            # read data from SDSS
            f = open(os.path.expandvars(lrg_input_cat), 'r')
            lrg = loadtxt(f)
            f.close()
            g = open(os.path.expandvars(qso_input_cat), 'r')
            qso = loadtxt(g)
            g.close()

            ###MY OWN REDSHIFT ONLY MATCHING HERE:

            lens_props = ['MAGG_LENS','MAGR_LENS','MAGI_LENS','MAGZ_LENS', \
            'MAGW1_LENS','MAGW2_LENS','MAGW3_LENS','MAGW4_LENS', 'SDSS_FLAG_LENS']

            src_props = ['MAGG_SRC','MAGR_SRC','MAGI_SRC','MAGZ_SRC', \
            'MAGW1_SRC','MAGW2_SRC','MAGW3_SRC','MAGW4_SRC', 'SDSS_FLAG_SRC']

            tmp_lens = Table(np.zeros((len(self.sample), len(lens_props)),
                                      dtype='f8'),
                             names=lens_props)
            tmp_src = Table(np.zeros((len(self.sample), len(src_props)),
                                     dtype='f8'),
                            names=src_props)

            if verbose: print('setup done')

            lrg_sort = lrg[np.argsort(lrg[:, 0]), :]
            qso_sort = qso[np.argsort(qso[:, 0]), :]
            lens_count = 0

            for lens in self.sample:

                #paint lens
                ind = np.searchsorted(lrg_sort[:, 0], lens['ZLENS'])
                if ind >= len(lrg_sort): ind = len(lrg_sort) - 1
                tmp_lens[lens_count] = lrg_sort[ind, 6:] - lrg_sort[
                    ind, 8] + lens['APMAG_I']  #assign colors, not mags
                #paint source
                qso_ind = np.searchsorted(qso_sort[:, 0], lens['ZSRC'])
                if qso_ind >= len(qso_sort): qso_ind = len(qso_sort) - 1
                tmp_src[lens_count] = qso_sort[qso_ind,
                                               1:] - qso_sort[qso_ind,
                                                              3] + lens['MAGI']

                lens_count += 1

            self.sample = hstack([self.sample, tmp_lens, tmp_src])

        if synthetic == True:
            lens_count = 0
            total = len(self.sample)
            Rfilter = tools.filterfromfile('r_SDSS')
            Ufilter = tools.filterfromfile('u_SDSS')
            # sort the Ufilter array
            Ufilterarg = np.sort(Ufilter[1])
            Ufilter = (Ufilter[0], Ufilterarg, 1)
            Gfilter = tools.filterfromfile('g_SDSS')
            Ifilter = tools.filterfromfile('i_SDSS')
            Zfilter = tools.filterfromfile('z_SDSS')
            self.Nlenses = len(self.sample)
            bands = ('r_SDSS_lens', 'g_SDSS_lens', 'i_SDSS_lens',
                     'z_SDSS_lens', 'u_SDSS_lens', 'r_SDSS_quasar',
                     'g_SDSS_quasar', 'i_SDSS_quasar', 'z_SDSS_quasar',
                     'u_SDSS_quasar')
            if verbose:
                print(
                    'OM10: computing synthetic magnitudes in the following bands: ',
                    bands)
            # call a distance class constructor
            d = distances.Distance()
            # number of data in the table of calculated magnitude
            totalEntrees = self.Nlenses * 10.0
            t = Table(np.arange(totalEntrees).reshape(self.Nlenses, 10),
                      names=bands)
            Lsed = tools.getSED('BC_Z=1.0_age=9.000gyr')
            Qsed = tools.getSED('agn')
            from astropy.cosmology import FlatLambdaCDM
            cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
            lenspop_constructor = population_functions.LensPopulation_()
            for lens in self.sample:
                # calculate the quasar magnitude
                redshift = lens['ZSRC']
                RF_Imag_app_q = lens['MAGI_IN']
                Qoffset = RF_Imag_app_q - tools.ABFilterMagnitude(
                    Ifilter, Qsed, redshift)
                RF_Rmag_app_q = tools.ABFilterMagnitude(
                    Rfilter, Qsed, redshift) + Qoffset
                RF_Gmag_app_q = tools.ABFilterMagnitude(
                    Gfilter, Qsed, redshift) + Qoffset
                RF_Zmag_app_q = tools.ABFilterMagnitude(
                    Zfilter, Qsed, redshift) + Qoffset
                if (redshift < 3.9):
                    RF_Umag_app_q = tools.ABFilterMagnitude(
                        Ufilter, Qsed, redshift) + Qoffset
                elif (redshift >= 3.9):
                    RF_Umag_app_q = 99
                # calculate the lens magnitude
                veldisp = np.atleast_1d(lens['VELDISP'])
                redshift = lens['ZLENS']
                # Reference Frame Absolute R magnitude
                RF_RMag_abs, _ = lenspop_constructor.EarlyTypeRelations(
                    veldisp)
                RMag_abs = tools.ABFilterMagnitude(Rfilter, Lsed, redshift)
                distMod = cosmo.distmod(redshift).value
                Rmag_app = RMag_abs + distMod
                offset_abs_app = RMag_abs - Rmag_app
                offset_RF_abs = RF_RMag_abs - RMag_abs
                RF_Rmag_app = RF_RMag_abs - offset_abs_app
                # Get filters and calculate magnitudes for each filter:
                RF_Umag_app = tools.ABFilterMagnitude(
                    Ufilter, Lsed, redshift) + offset_RF_abs + distMod
                RF_Gmag_app = tools.ABFilterMagnitude(
                    Gfilter, Lsed, redshift) + offset_RF_abs + distMod
                RF_Imag_app = tools.ABFilterMagnitude(
                    Ifilter, Lsed, redshift) + offset_RF_abs + distMod
                RF_Zmag_app = tools.ABFilterMagnitude(
                    Zfilter, Lsed, redshift) + offset_RF_abs + distMod
                t['u_SDSS_lens'][lens_count] = RF_Umag_app
                t['r_SDSS_lens'][lens_count] = RF_Rmag_app
                t['g_SDSS_lens'][lens_count] = RF_Gmag_app
                t['i_SDSS_lens'][lens_count] = RF_Imag_app
                t['z_SDSS_lens'][lens_count] = RF_Zmag_app
                t['u_SDSS_quasar'][lens_count] = RF_Umag_app_q
                t['r_SDSS_quasar'][lens_count] = RF_Rmag_app_q
                t['g_SDSS_quasar'][lens_count] = RF_Gmag_app_q
                t['i_SDSS_quasar'][lens_count] = RF_Imag_app_q
                t['z_SDSS_quasar'][lens_count] = RF_Zmag_app_q
                lens_count = lens_count + 1
                dot = np.mod(lens_count, total / np.min([79, total])) == 0
                if verbose and dot:
                    print('.', end="")

            # Update the sample by adding the table of calculated magnitude
            self.sample.add_columns(t.columns.values())
            self.lenses = self.sample.copy()

        return
コード例 #49
0
 def time_hstack(self):
     hstack([self.table, self.other_table_2])
コード例 #50
0
ファイル: source_catalog.py プロジェクト: djfarrow/HETDEX_API
def main(argv=None):
    """ Main Function """

    parser = get_parser()
    args = parser.parse_args(argv)

    print('Combining catalogs')
    global source_table
    print(args.dsky, args.version)

    source_table = create_source_catalog(version=args.version, dsky=args.dsky)
    #    source_table.write('source_cat_tmp.fits', overwrite=True)
    #    source_table = Table.read('source_cat_tmp.fits')
    # add source name
    source_name = []
    for row in source_table:
        source_name.append(get_source_name(row["ra_mean"], row["dec_mean"]))
    try:
        source_table.add_column(source_name, name="source_name", index=1)
    except:
        print('messed up source name again')

    # match to SDSS
    source_table_coords = SkyCoord(source_table['ra_mean'],
                                   source_table['dec_mean'],
                                   unit='deg')
    sdssfile = op.join(config.imaging_dir, 'sdss', 'specObj-dr16-trim.fits')
    sdsstab = Table.read(sdssfile)
    sdsstab = Table.read(sdssfile)
    sdss_coords = SkyCoord(ra=sdsstab['PLUG_RA'],
                           dec=sdsstab['PLUG_DEC'],
                           unit='deg')
    idx, d2d, d3d = source_table_coords.match_to_catalog_sky(sdss_coords)

    catalog_matches = sdsstab[idx]
    catalog_matches['sdss_dist'] = d2d.to_value(u.arcsec)

    catalog_matches.rename_column('PLUG_RA', 'ra_sdss')
    catalog_matches.rename_column('PLUG_DEC', 'dec_sdss')
    catalog_matches.rename_column('CLASS', 'sdss_class')
    catalog_matches.rename_column('Z', 'z_sdss')
    catalog_matches.rename_column('Z_ERR', 'z_sdss_err')

    matched_catalog = hstack([source_table, catalog_matches])

    sel_remove = matched_catalog['sdss_dist'] > 5

    matched_catalog['ra_sdss'][sel_remove] = np.nan
    matched_catalog['dec_sdss'][sel_remove] = np.nan
    matched_catalog['sdss_dist'][sel_remove] = np.nan
    matched_catalog['sdss_class'][sel_remove] = ''
    matched_catalog['z_sdss'][sel_remove] = np.nan
    matched_catalog['z_sdss_err'][sel_remove] = np.nan

    source_table = matched_catalog

    # match band-merged WISE catalog

    wise_catalog = Table.read('wise-hetdexoverlap.fits')
    source_table_coords = SkyCoord(source_table['ra_mean'],
                                   source_table['dec_mean'],
                                   unit='deg')
    wise_coords = SkyCoord(ra=wise_catalog['ra'],
                           dec=wise_catalog['dec'],
                           unit='deg')
    idx, d2d, d3d = source_table_coords.match_to_catalog_sky(wise_coords)

    catalog_matches = wise_catalog[idx]
    catalog_matches['wise_dist'] = d2d.to_value(u.arcsec)

    keep_wise = catalog_matches['ra', 'dec', 'primary', 'unwise_objid', 'flux',
                                'wise_dist']
    keep_wise.rename_column('flux', 'wise_fluxes')
    keep_wise.rename_column('ra', 'ra_wise')
    keep_wise.rename_column('dec', 'dec_wise')

    matched_catalog = hstack([source_table, keep_wise])

    w1 = []
    w2 = []
    for row in matched_catalog:
        w1.append(row['wise_fluxes'][0])
        w2.append(row['wise_fluxes'][1])

    matched_catalog['flux_w1'] = w1
    matched_catalog['flux_w2'] = w2
    matched_catalog.remove_column('wise_fluxes')
    sel_close = matched_catalog['wise_dist'] < 5  #arcsec

    print('There are {} wise matches'.format(
        np.size(np.unique(matched_catalog['source_id'][sel_close]))))
    # remove column info for WISE matches more than 5 arcsec away

    sel_remove = matched_catalog['wise_dist'] > 5  #arcsec

    matched_catalog['ra_wise'][sel_remove] = np.nan
    matched_catalog['dec_wise'][sel_remove] = np.nan
    matched_catalog['wise_dist'][sel_remove] = np.nan
    matched_catalog['primary'][sel_remove] = -1
    matched_catalog['unwise_objid'][sel_remove] = np.nan
    matched_catalog['flux_w1'][sel_remove] = np.nan
    matched_catalog['flux_w2'][sel_remove] = np.nan

    source_table = matched_catalog

    # add z_spec from other catlogs if it exists:
    goods_z = Table.read('catalogs/goods_n_specz_1018_no_header.txt',
                         names=[
                             'ra_zspec', 'dec_zspec', 'zspec', 'z_quality',
                             'zspec_catalog', 'Symbol'
                         ],
                         format='ascii.no_header')

    #DEIMOS 10k (Hasinger et al. 2018) z_spec up to ~6
    #https://cosmos.astro.caltech.edu/news/65
    deimos = Table.read('catalogs/deimos_redshifts.tbl', format='ascii')
    deimos.rename_column('Ra', 'ra_zspec')
    deimos.rename_column('Dec', 'dec_zspec')
    deimos['zspec_catalog'] = 'CosmosDeimos'

    #Kriek et al. (2015)
    #http://mosdef.astro.berkeley.edu/for-scientists/data-releases/
    mosdef = Table.read('catalogs/mosdef_zcat.final_slitap.fits')
    mosdef.rename_column('RA', 'ra_zspec')
    mosdef.rename_column('DEC', 'dec_zspec')
    mosdef.rename_column('Z_MOSFIRE', 'zspec')
    mosdef['zspec_catalog'] = 'MOSFIRE'

    #VUDS (Tasca et al. 2017), z_spec up to ~6
    #http://cesam.lam.fr/vuds/DR1/
    zcosbright = Table.read(
        'catalogs/cesam_zcosbrightspec20k_dr3_catalog_1616073679.txt',
        format='ascii')
    zcosbright.rename_column('zpec', 'zspec')
    zcosbright.rename_column('ra', 'ra_zspec')
    zcosbright.rename_column('dec', 'dec_zspec')
    zcosbright['zspec_catalog'] = 'zCosmosBright'

    deep_specz = Table.read(
        'catalogs/DEEP_zcosmos_spectroscopy_one_v2.6_data.cat',
        format='ascii',
        data_start=100)
    deep_specz.rename_column('col1', 'zCOSMOS-deepID')
    deep_specz.rename_column('col2', 'zspec')
    deep_specz.rename_column('col3', 'flag')
    deep_specz.rename_column('col4', 'zphot')
    deep_specz.rename_column('col5', 'ra_zspec')
    deep_specz.rename_column('col6', 'dec_zspec')
    deep_specz['zspec_catalog'] = 'DEEP_zcosmos'

    sdssfile = op.join(config.imaging_dir, 'sdss', 'specObj-dr16-trim.fits')

    sdss_specz = Table.read(sdssfile)
    sdss_specz.rename_column('PLUG_RA', 'ra_zspec')
    sdss_specz.rename_column('PLUG_DEC', 'dec_zspec')
    sdss_specz.rename_column('CLASS', 'sdss_class')
    sdss_specz.rename_column('Z', 'zspec')
    sdss_specz.rename_column('Z_ERR', 'z_sdss_err')
    sdss_specz['zspec_catalog'] = 'sdss-dr16'

    specz_catalogs = vstack([
        goods_z['zspec', 'ra_zspec', 'dec_zspec', 'zspec_catalog'],
        deimos['zspec', 'ra_zspec', 'dec_zspec', 'zspec_catalog'],
        mosdef['zspec', 'ra_zspec', 'dec_zspec', 'zspec_catalog'],
        zcosbright['zspec', 'ra_zspec', 'dec_zspec', 'zspec_catalog'],
        deep_specz['zspec', 'ra_zspec', 'dec_zspec', 'zspec_catalog'],
        sdss_specz['zspec', 'ra_zspec', 'dec_zspec', 'zspec_catalog'],
    ])
    sel = specz_catalogs['zspec'] >= 0
    specz_catalogs = specz_catalogs[sel]

    specz_coords = SkyCoord(ra=specz_catalogs['ra_zspec'],
                            dec=specz_catalogs['dec_zspec'],
                            unit='deg')
    source_coords = SkyCoord(ra=source_table['ra'],
                             dec=source_table['dec'],
                             unit='deg')

    idx, d2d, d3d = source_coords.match_to_catalog_sky(specz_coords)

    catalog_matches = specz_catalogs[idx]
    catalog_matches['zspec_dist'] = d2d.to_value(u.arcsec)

    matched_catalog = hstack([source_table, catalog_matches])

    sel_close = matched_catalog['zspec_dist'] < 5  #u.arcsec

    print('There are {} zspec matches within 5 arcsec'.format(
        np.size(np.unique(matched_catalog['source_id'][sel_close]))))

    sel_remove = matched_catalog['zspec_dist'] > 5  #u.arcsec

    matched_catalog['zspec'][sel_remove] = np.nan
    matched_catalog['ra_zspec'][sel_remove] = np.nan
    matched_catalog['dec_zspec'][sel_remove] = np.nan
    matched_catalog['zspec_dist'][sel_remove] = np.nan
    matched_catalog['zspec_catalog'][sel_remove] = ''

    #add desi confirmed redshifts

    dtab = Table.read('catalogs/desi-hetdex.fits')

    sel_good_hetdex = (dtab['artifact_flag'] == 0) * (dtab['wave'] >= 3610)
    sel_good_desi = (dtab['FIBERSTATUS'] == 0)
    sel_sample = sel_good_desi * sel_good_hetdex
    sel_conf = dtab['VI_quality'] >= 1

    hetdex_coords = SkyCoord(ra=dtab['ra'], dec=dtab['dec'], unit='deg')
    desi_coords = SkyCoord(ra=dtab['TARGET_RA'],
                           dec=dtab['TARGET_DEC'],
                           unit='deg')
    dtab['zspec_dist'] = hetdex_coords.separation(desi_coords).arcsec

    zspec = []
    zspec_dist = []

    desi_matches = dtab['detectid'][sel_sample * sel_conf]
    for row in dtab[sel_sample * sel_conf]:
        if row['VI_z'] > 1.9:
            wave_z = (1 + row['VI_z']) * wavelya
            if (np.abs(wave_z - row['wave']) < 10):
                zspec.append(row['VI_z'])
                zspec_dist.append(row['zspec_dist'])
            else:
                zspec.append(np.nan)
                zspec_dist.append(np.nan)

        elif row['VI_z'] < 0.5:
            wave_z = (1 + row['VI_z']) * waveoii
            if (np.abs(wave_z - row['wave']) < 10):
                zspec.append(row['VI_z'])
                zspec_dist.append(row['zspec_dist'])
            else:
                zspec.append(np.nan)
                zspec_dist.append(np.nan)
        else:
            zspec.append(np.nan)
            zspec_dist.append(np.nan)

    for i in np.arange(len(desi_matches)):
        sel_det = matched_catalog['detectid'] == desi_matches[i]
        matched_catalog['zspec'][sel_det] = zspec[i]
        matched_catalog['zspec_dist'][sel_det] = zspec_dist[i]
        matched_catalog['zspec_catalog'][sel_det] = 'DESI'

    source_table = matched_catalog

    # Clear up memory

    for name in dir():
        if source_table:
            continue
        elif not name.startswith('_'):
            del name

    import gc
    gc.collect()

    # sort table closest to group mean position
    # so unique will produce the closest match

    src_coord = SkyCoord(ra=source_table['ra_mean'],
                         dec=source_table['dec_mean'],
                         unit='deg')
    det_coord = SkyCoord(ra=source_table['ra'],
                         dec=source_table['dec'],
                         unit='deg')

    source_table['src_separation'] = det_coord.separation(src_coord)
    source_table.sort(['src_separation'])

    print('Filling masked values with NaNs')

    for col in source_table.columns:
        try:
            source_table[col] = source_table[col].filled(np.nan)
            print('yes', col)
        except:
            pass
            #print('no', col)
    #remove nonsense metadata
    source_table.meta = {}
    source_table.write("source_catalog_{}.fits".format(args.version),
                       overwrite=True)
コード例 #51
0
ファイル: photometry.py プロジェクト: aboucaud/photutils
    def _do_photometry(self, param_tab, n_start=1):
        """
        Helper function which performs the iterations of the photometry process.

        Parameters
        ----------
        param_names :  list
            Names of the columns which represent the initial guesses.
            For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on the
            center positions and the flux.
        n_start : int
            Integer representing the start index of the iteration.
            It is 1 if init_guesses are None, and 2 otherwise.

        Returns
        -------
        output_table : `~astropy.table.Table` or None
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process.
        """

        output_table = Table()
        self._define_fit_param_names()

        for (init_param_name,
             fit_param_name) in zip(self._pars_to_set.keys(),
                                    self._pars_to_output.keys()):
            output_table.add_column(Column(name=init_param_name))
            output_table.add_column(Column(name=fit_param_name))

        sources = self.finder(self._residual_image)

        n = n_start
        while (len(sources) > 0 and (self.niters is None or n <= self.niters)):
            apertures = CircularAperture(
                (sources['xcentroid'], sources['ycentroid']),
                r=self.aperture_radius)
            sources['aperture_flux'] = aperture_photometry(
                self._residual_image, apertures)['aperture_sum']

            init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'],
                                   data=[
                                       sources['id'], sources['xcentroid'],
                                       sources['ycentroid'],
                                       sources['aperture_flux']
                                   ])

            for param_tab_name, param_name in self._pars_to_set.items():
                if param_tab_name not in (['x_0', 'y_0', 'flux_0']):
                    init_guess_tab.add_column(
                        Column(name=param_tab_name,
                               data=getattr(self.psf_model, param_name) *
                               np.ones(len(sources))))

            star_groups = self.group_maker(init_guess_tab)
            table, self._residual_image = super(
                IterativelySubtractedPSFPhotometry,
                self).nstar(self._residual_image, star_groups)

            star_groups = star_groups.group_by('group_id')
            table = hstack([star_groups, table])

            table['iter_detected'] = n * np.ones(table['x_fit'].shape,
                                                 dtype=np.int32)

            output_table = vstack([output_table, table])
            sources = self.finder(self._residual_image)
            n += 1

        return output_table
コード例 #52
0
            # make new file
            stars.to_csv(output_stars, mode='w', index=False)
            stars_counter += 1
        else:
            # append to existing file
            stars.to_csv(output_stars, mode='a', index=False, header=False)

    new_metadata_columns = Table({
        'file_name': [file_name],
        'zooniverse_subject_id': [subject_id],
        'number_galaxy_centers': [len(centers)],
        'number_foreground_stars': [len(stars)],
        'observed': [manga_id in observed_ids]
    })

    new_metadata = hstack([new_metadata_columns, metadata])
    new_metadata = new_metadata.to_pandas()

    if metadata_counter == 0:
        # make new file
        new_metadata.to_csv(output_metadata, mode='w', index=False)
        metadata_counter += 1
    else:
        # append to existing file
        new_metadata.to_csv(output_metadata,
                            mode='a',
                            index=False,
                            header=False)

    # strip user_name and user_id
    for hdu_index in [8, 9, 10]:
コード例 #53
0
    if len(r_ness) % 5000 == 0:
        print(len(r_ness))

r_ness = Column(r_ness, name='richness')
rand.add_column(r_ness)
rand.write(indir + '5_random_catalog_richness_' + str(aper) + '.list',
           format='ascii',
           overwrite=True)

#%%
""" CALCULATING RATIO OF GOOD-TOTAL PIXELS AROUND EACH RANDOM SIGNPOST """

rand = Table.read(indir + '5_random_catalog_richness_' + str(aper) + '.list',
                  format='ascii')
pixel_dict = gpf(rand, mask_image, aper)  # ~50 minutes

#%%
"""Constructing New Table of Random Points with Upscaled Richness """

pix_tab = Table(np.column_stack(
    [pixel_dict['tpix'], pixel_dict['gpix'], pixel_dict['gpf']]),
                names=('tot_pix', 'good_pix', 'gpf'))
rand_new = hstack([rand, pix_tab])
rand_upscaled = upscaling(rand_new)
rand_upscaled.write(indir + '5_random_catalog_with_upscaled_richness' +
                    str(aper) + '.list',
                    format='ascii',
                    overwrite=True)

#%%
コード例 #54
0
ファイル: fits_warp.py プロジェクト: nhurleywalker/fits_warp
def warped_xmatch(
    incat=None,
    refcat=None,
    ra1="ra",
    dec1="dec",
    ra2="RAJ2000",
    dec2="DEJ2000",
    radius=2 / 60.0,
):
    """
    Create a cross match solution between two catalogues that accounts for bulk shifts and image warping.
    The warping is done in pixel coordinates, not sky coordinates.

    :param image: Fits image containing the WCS info for sky->pix conversion (Ideally the image which was used
                  to create incat.
    :param incat: The input catalogue which is to be warped during the cross matching process.
    :param ref_cat: The reference image which will remain unwarped during the cross matching process
    :param ra1, dec1: column names for ra/dec in the input catalogue
    :param ra2, dec2: column names for ra/dec in the reference catalogue
    :param radius: initial matching radius in degrees
    :return:
    """
    # check for incat/refcat as as strings, and load the file if it is
    incat = Table.read(incat)
    refcat = Table.read(refcat)

    # The data attribute is needed in case either table carries with it a unit metavalue. If
    # it can not be parsed then the below will fail without the data, as SkyCoord ignores the
    # specified unit
    target_cat = SkyCoord(incat[ra1].data,
                          incat[dec1].data,
                          unit=(u.degree, u.degree),
                          frame="icrs")
    ref_cat = SkyCoord(refcat[ra2].data,
                       refcat[dec2].data,
                       unit=(u.degree, u.degree),
                       frame="icrs")

    center = SkyOffsetFrame(origin=SkyCoord(
        np.mean(target_cat.ra), np.mean(target_cat.dec), frame="icrs"))

    tcat_offset = target_cat.transform_to(center)
    rcat_offset = ref_cat.transform_to(center)

    # crossmatch the two catalogs
    idx, dist, _ = tcat_offset.match_to_catalog_sky(rcat_offset)

    # accept only matches within radius
    distance_mask = np.where(
        dist.degree < radius)  # this mask is into tcat_offset
    match_mask = idx[distance_mask]  # this mask is into rcat_offset
    print(len(match_mask))

    # calculate the ra/dec shifts
    dlon = rcat_offset.lon[match_mask] - tcat_offset.lon[distance_mask]
    dlat = rcat_offset.lat[match_mask] - tcat_offset.lat[distance_mask]

    # remake the offset catalogue with the bulk shift included
    tcat_offset = SkyCoord(tcat_offset.lon + np.mean(dlon),
                           tcat_offset.lat + np.mean(dlat),
                           frame=center)

    # now do this again 3 more times but using the Rbf
    for i in range(3):
        # crossmatch the two catalogs
        idx, dist, _ = tcat_offset.match_to_catalog_sky(rcat_offset)
        # accept only matches within radius
        distance_mask = np.where(dist.degree < radius)  # this mask is into cat
        match_mask = idx[distance_mask]  # this mask is into tcat_offset
        if len(match_mask) < 1:
            break

        # calculate the ra/dec shifts
        dlon = (rcat_offset.lon.degree[match_mask] -
                tcat_offset.lon.degree[distance_mask])
        dlat = (rcat_offset.lat.degree[match_mask] -
                tcat_offset.lat.degree[distance_mask])

        # use the following to make some models of the offsets
        dlonmodel = interpolate.Rbf(
            tcat_offset.lon.degree[distance_mask],
            tcat_offset.lat.degree[distance_mask],
            dlon,
            function="linear",
            smooth=3,
        )
        dlatmodel = interpolate.Rbf(
            tcat_offset.lon.degree[distance_mask],
            tcat_offset.lat.degree[distance_mask],
            dlat,
            function="linear",
            smooth=3,
        )

        # remake/update the tcat_offset with this new model.
        tcat_offset = SkyCoord(
            tcat_offset.lon +
            dlonmodel(tcat_offset.lon.degree, tcat_offset.lat.degree) *
            u.degree,
            tcat_offset.lat +
            dlatmodel(tcat_offset.lon.degree, tcat_offset.lat.degree) *
            u.degree,
            frame=center,
        )

    # final crossmatch to make the xmatch file
    idx, dist, _ = tcat_offset.match_to_catalog_sky(rcat_offset)
    # accept only matches within radius
    distance_mask = np.where(dist.degree < radius)  # this mask is into cat
    match_mask = idx[distance_mask]  # this mask is into tcat_offset
    # print("Final mask {0}".format(len(match_mask)))
    xmatch = hstack([incat[distance_mask], refcat[match_mask]])

    # return a warped version of the target catalogue and the final cross matched table
    tcat_corrected = tcat_offset.transform_to(target_cat)
    incat[ra1] = tcat_corrected.ra.degree
    incat[dec1] = tcat_corrected.dec.degree
    return incat, xmatch
コード例 #55
0
def load_z(fibermap_files, zbest_files=None, outfil=None):
    '''Load input and output redshift values for a set of exposures

    Parameters
    ----------
    fibermap_files: list
      List of fibermap files;  None of these should be calibration..
    zbest_files: list, optional
      List of zbest output files
      Slurped from fibermap info if not provided
    outfil: str, optional
      Output file for the table

    Returns
    -------
    simz_tab: astropy.Table
      Merged table of simpsec data
    zb_tab: astropy.Table
      Merged table of zbest output
    '''
    # imports
    log = get_logger()

    # Init
    if zbest_files is None:
        flag_load_zbest = True
        zbest_files = []
    else:
        flag_load_zbest = False
    # Load up fibermap and simspec tables
    fbm_tabs = []
    sps_tabs = []
    for fibermap_file in fibermap_files:

        # zbest?
        if flag_load_zbest:
            fibermap_data = desispec.io.read_fibermap(fibermap_file)
            zbest_files += find_zbest_files(fibermap_data)

        log.info('Reading: {:s}'.format(fibermap_file))
        # Load simspec (for fibermap too!)
        simspec_file = fibermap_file.replace('fibermap', 'simspec')
        sps_hdu = fits.open(simspec_file)
        # Make Tables
        fbm_tabs.append(Table(sps_hdu['FIBERMAP'].data, masked=True))
        sps_tabs.append(Table(sps_hdu['TRUTH'].data, masked=True))
        sps_hdu.close()

    # Stack
    fbm_tab = vstack(fbm_tabs)
    sps_tab = vstack(sps_tabs)
    del fbm_tabs, sps_tabs

    # Add the version number header keywords from fibermap_files[0]
    hdr = fits.getheader(fibermap_files[0].replace('fibermap', 'simspec'))
    for key, value in sorted(hdr.items()):
        if key.startswith('DEPNAM') or key.startswith('DEPVER'):
            fbm_tab.meta[key] = value

    # Drop to unique
    univ, uni_idx = np.unique(np.array(fbm_tab['TARGETID']), return_index=True)
    fbm_tab = fbm_tab[uni_idx]
    sps_tab = sps_tab[uni_idx]

    # Combine + Sort
    sps_tab.remove_column('TARGETID')  # It occurs in both tables
    sps_tab.remove_column('MAG')  # It occurs in both tables
    simz_tab = hstack([fbm_tab, sps_tab], join_type='exact')
    simz_tab.sort('TARGETID')

    # Cleanup some names
    #simz_tab.rename_column('OBJTYPE_1', 'OBJTYPE')
    #simz_tab.rename_column('OBJTYPE_2', 'TRUETYPE')

    # Update QSO naming
    qsol = np.where(match_otype(simz_tab, 'QSO')
                    & (simz_tab['TRUEZ'] >= 2.1))[0]
    simz_tab['TEMPLATETYPE'][qsol] = 'QSO_L'
    qsot = np.where(match_otype(simz_tab, 'QSO')
                    & (simz_tab['TRUEZ'] < 2.1))[0]
    simz_tab['TEMPLATETYPE'][qsot] = 'QSO_T'

    # Load up zbest files
    zb_tabs = []
    for zbest_file in zbest_files:
        try:
            zb_hdu = fits.open(zbest_file)
        except FileNotFoundError:
            log.error("zbest file {} not found".format(zbest_file))
        else:
            zb_tabs.append(Table(zb_hdu[1].data))

    # Stack
    zb_tab = vstack(zb_tabs)
    univ, uni_idx = np.unique(np.array(zb_tab['TARGETID']), return_index=True)
    zb_tab = zb_tab[uni_idx]

    # Return
    return simz_tab, zb_tab
コード例 #56
0
    def query_region(self,
                     coords,
                     radius=3.5 * u.arcsec,
                     shotid=None,
                     return_index=False,
                     return_flags=True):
        """
        Function to retrieve the indexes of the FiberIndex table
        for a specific region

        Parameters
        ----------
        self
            the FiberIndex class for a specific survey
        coords
            center coordinate you want to search. This should
            be an astropy SkyCoord object
        radius
            radius you want to search. An astropy quantity object
        shotid
            Specific shotid (dtype=int) you want
        return_index: bool
            Option to return row index values for slicing. Default
            is False
        return_flags: bool
            Option to return mask info. Default is True

        Returns
        -------
        table: astropy table
            An astropy table of Fiber infomation in queried aperture
        table_index: optional
            an optional array of row coordinates corresponding to the
            retrieved fiber table
        """

        Nside = 2**15

        ra_obj = coords.ra.deg
        dec_obj = coords.dec.deg

        ra_sep = radius.to(u.degree).value + 3.0 / 3600.0

        vec = hp.ang2vec(ra_obj, dec_obj, lonlat=True)

        pix_region = hp.query_disc(Nside, vec, (ra_sep * np.pi / 180))

        seltab = Table()
        table_index = []

        for hpix in pix_region:
            if shotid:
                h_tab, h_tab_index = self.get_fib_from_hp(hpix,
                                                          shotid=shotid,
                                                          return_index=True)
            else:
                h_tab, h_tab_index = self.get_fib_from_hp(hpix,
                                                          return_index=True)
            seltab = vstack([seltab, h_tab])
            table_index.extend(h_tab_index)

        fibcoords = SkyCoord(seltab["ra"] * u.degree,
                             seltab["dec"] * u.degree,
                             frame="icrs")

        idx = coords.separation(fibcoords) < radius

        selected_index = np.array(table_index)[idx]

        if return_flags:
            if self.fibermaskh5 is None:
                print('No fiber mask file found')
            else:
                mask_table = Table(
                    self.fibermaskh5.root.Flags.read_coordinates(
                        selected_index))

            selected_index = np.array(table_index)[idx]
            fiber_table = hstack([seltab[idx], mask_table])
            #check fibers match
            for row in fiber_table:
                if row['fiber_id_1'] == row['fiber_id_2']:
                    continue
                else:
                    print('Something is wrong. Mismatcheded fiber:{} and {}'.
                          format(row['fiber_id_1'], row['fiber_id_2']))
            fiber_table.rename_column('fiber_id_1', 'fiber_id')
            fiber_table.remove_column('fiber_id_2')
        else:
            fiber_table = seltab[idx]

        if return_index:
            try:
                return fiber_table, selected_index
            except TypeError:
                return None, None
        else:
            return fiber_table
コード例 #57
0
ファイル: gaia_V.3.3.py プロジェクト: prajwel/UVIT-POC
nalpha, ndelta, nuv_mag, nuv_fwhm = zip(*refined_set)

confined_set = [
    (nm, al, de, nf)
    for al, de, nm, nf in zip(nalpha, ndelta, nuv_mag, nuv_fwhm)
    if cel_separation(al, de) <= field_radius[instrument] * u.arcsec
]

nd = np.array(sorted(confined_set))[0:5]
cat_nuv_counts = countnuv(nd[:, 0])
cat_nuv_res = format_nuv(cat_nuv_counts)

# To convert ra_deg and dec_deg to ra_hms and dec_dms.
xy_tab = deg_to_hms(nd[:, 1], nd[:, 2])
cat_nuv_res = hstack([xy_tab, cat_nuv_res])

balance = cat_nuv_res['ra_hms', 'dec_dms', 'Mag']
balance.rename_column('Mag', 'CAT_Mag')
balance['fwhm'] = nd[:, 3]
balance['fwhm'].format = '4.4f'

# FUV
fuv_mag = hdu[1].data['fuv_mag']

fuv_absent = 'no'
if len(np.unique(fuv_mag)) == 1:  # when FUV data is absent.
    fd = nd
    warning = '\nFUV observations seem to be absent! Using M_fuv = M_nuv - 1.65.'
    print(warning)
    fuv_absent = 'yes'
コード例 #58
0
    def __init__(self,
                 survey=LATEST_HDR_NAME,
                 load_fiber_table=False,
                 loadall=False):
        """
        Initialize the Fiber class for a given data release
        
        Parameters
        ----------
        survey : string
            Data release you would like to load, i.e., 'hdr1','HDR2'
            This is case insensitive.
        load_fiber_table : bool
            Option to read in all fibers. This takes about a minute
            and will use a lot of memory.

        Returns
        -------
        FiberIndex class object
        """
        self.survey = survey

        if self.survey == "hdr1":
            print("Sorry there is no FiberIndex for hdr1")
            return None

        global config
        config = HDRconfig(survey=survey.lower())

        self.filename = config.fiberindexh5
        self.hdfile = tb.open_file(self.filename, mode="r")
        self.fiber_table = None
        try:
            self.fibermaskh5 = tb.open_file(config.fibermaskh5, 'r')
        except:
            print('Could not find fiber mask file in {}'.format(
                config.fibermaskh5))
            self.fibermaskh5 = None

        if load_fiber_table:
            self.fiber_table = Table(self.hdfile.root.FiberIndex.read())
            self.coords = SkyCoord(
                self.fiber_table["ra"] * u.degree,
                self.fiber_table["dec"] * u.degree,
                frame="icrs",
            )

            # add masking info if found
            if self.fibermaskh5 is not None:
                self.mask_table = Table(self.fibermaskh5.root.Flags.read())
                self.fiber_table = hstack([self.fiber_table, self.mask_table])

                for row in self.fiber_table:
                    if row['fiber_id_1'] == row['fiber_id_2']:
                        continue
                    else:
                        print(
                            'Something is wrong. Mismatcheded fiber:{} and {}'.
                            format(row['fiber_id_1'], row['fiber_id_2']))
                self.fiber_table.rename_column('fiber_id_1', 'fiber_id')
                self.fiber_table.remove_column('fiber_id_2')
コード例 #59
0
from utilities import *
from astropy.table import Table, vstack, hstack
import glob
import numpy as np
import astropy.units as u
import astropy.constants as cnst

c=cnst.c

source = 'SgrB2S'
sourcepath=sourcedict[source]

sgrb2sz=0.000234806

mom0home=sourcepath+'mom0/*masked.fits'

txtfile=sourcepath+'mastereuksqnsfreqsdegens.txt'
array=np.genfromtxt(txtfile,dtype=str)
transitions=array[:,1]
freqs=(array[:,2].astype(np.float)*u.Hz).to('GHz')
freqs=[str(round((x.value*(1+sgrb2sz)),5)) for x in freqs]

stack=hstack([transitions,freqs])
table=Table(stack,names=['Transition','Frequency'])
table.write('methanoltransitiontable.fits',overwrite=True)
table.write('methanoltransitiontable.csv',overwrite=True)
#mom0s=glob.glob(mom0home)

print(freqs)

コード例 #60
0
def ns_combine(ned_name,
               simbad_name,
               ns_combine,
               final_tab,
               match_tol=1.0):  # match_tol in arcsec

    ned_in = Table.read(ned_name)
    simbad_in = Table.read(simbad_name)

    # prelim processing
    ned_proc = reformat_cat(
        ned_in,
        old_name='Object Name',
        new_name='Name_N',
        old_type='Type',
        new_type='Type_N',
        keepcols=['Object Name', 'RA(deg)', 'DEC(deg)', 'Type'])
    sim_proc = reformat_cat(simbad_in,
                            old_name='MAIN_ID',
                            new_name='Name_S',
                            old_type='OTYPE',
                            new_type='Type_S')

    # construct coordinates needed for matching # ***** MATT - created SkyCoord's w correct unit columns
    ned_coo = SkyCoord(ra=ned_proc['RA(deg)'], dec=ned_proc['DEC(deg)'])
    sim_coo = SkyCoord(ra=sim_proc['RA_d'], dec=sim_proc['DEC_d'])

    # do the matching # ***** MATT - Returns indices of matched col's for ned+sim tables
    matched_ned, matched_sim, ned_only, sim_only = symmetric_match_sky_coords(
        ned_coo, sim_coo, match_tol * u.arcsec)

    print("Matched NED column:")
    print(ned_proc[matched_ned])
    print("Matched SIMBAD column:")
    print(sim_proc[matched_sim])
    print("Unmatched NED:")
    print(ned_proc[ned_only])
    print("Unmatched SIMBAD:")
    print(sim_proc[sim_only])

    # generate the matched table
    matchtab = hstack([ned_proc[matched_ned], sim_proc[matched_sim]],
                      join_type='outer')
    # mark the really good matches
    matchtab2 = process_match(matchtab)
    matchtab2.write(ns_combine, format='fits')
    # rename some columns
    nedcat = process_unmatch(ned_proc[ned_only],
                             src='N',
                             rename_cols=ned_rename)
    simcat = process_unmatch(sim_proc[sim_only],
                             src='S',
                             rename_cols=sim_rename)
    keeplist = ['Name_N', 'RA(deg)', 'DEC(deg)', 'Type_N']
    matchtab3 = process_unmatch(Table(matchtab2[keeplist]),
                                src='NS',
                                rename_cols=ned_rename)

    # add on the unmatched objects
    finaltab = vstack([matchtab3, nedcat, simcat], join_type='outer')

    # save the result
    finaltab.write(final_tab, format='fits')

    return