예제 #1
0
def update(datadir, outdir):
    ref_file = os.path.join(datadir, 'ref_obs_data.dat')
    if os.path.exists(ref_file):
        print "Reading reference data from {}".format(ref_file)
        ref_data = Table.read(ref_file, format='ascii')
    else:
        ref_data = get_obs_table('2015:100', '2016:100', msf='ENAB')
        ref_data.write(ref_file, format='ascii')

    data_file = os.path.join(datadir, 'msd_data.dat')
    if os.path.exists(data_file):
        print "Reading previous data from {}".format(data_file)
        last_data = Table.read(data_file, format='ascii')
        new_data = get_obs_table(last_data[-5]['date'], DateTime(), msf='DISA')
        if new_data['time'][0] > last_data['time'][-1]:
            msd_data = vstack([last_data, new_data])
        else:
            idx_old_data = np.flatnonzero(last_data['time'] >= new_data['time'][0])[0]
            msd_data = vstack([last_data[0:idx_old_data], new_data])
        msd_data.write(data_file, format='ascii')
        # but only use the last year for making these plots
        msd_data = msd_data[msd_data['date'] >= (DateTime() - 365).date]
    else:
        msd_data = get_obs_table(DateTime() - 365, DateTime(), msf='DISA')
        msd_data.write(data_file, format='ascii')

    print msd_data[-1]['date']

    # Filter known bad obsids (MUPS test fires)
    for obsid in [50702, 49802]:
        msd_data = msd_data[msd_data['obsid'] != obsid]

    one_shot_plot(outdir=outdir)
    att_err_time_plots(ref_data, msd_data, outdir=outdir)
    att_err_hist(ref_data, msd_data, outdir=outdir)
예제 #2
0
def do_it(comm):
    rand = np.random.RandomState()
    rank = comm.Get_rank()
    nproc = comm.Get_size()    
    mockpaths = ["/project/projectdirs/desi/mocks/GaussianRandomField/v0.0.4/",
                 "/project/projectdirs/desi/mocks/GaussianRandomField/v0.0.4/",
                 "/project/projectdirs/desi/mocks/bgs/MXXL/desi_footprint/v0.0.3/BGS_new_footprint.hdf5"]
    targetnames = ["ELG", "LRG", "BGS"]
    mockformats = ["gaussianfield", "gaussianfield", "durham_mxxl_hdf5"]

    #open fits files
    targetsfile = 'targets_proc_{}.fits'.format(rank)

    alltargets = list()
    alltruth = list()
    alltrueflux  = list()
    for mock_path, target_name, mock_format in zip(mockpaths, targetnames, mockformats):        
        targets, truth, trueflux, targkeep = get_mock_spectra(mock_path, target_name, mock_format, rand=rand, comm=comm)        
        print(rank, target_name, 'RA', len(targets['RA']), targets['RA'][0])

        alltargets.append(targets)
        alltruth.append(truth)
        alltrueflux.append(trueflux)
    
    targets = vstack(alltargets)
    truth = vstack(alltruth)
    trueflux = np.concatenate(alltrueflux)

    targets.write(targetsfile, overwrite=True)

    return 0
예제 #3
0
def test_vstack():
    """
    Vstack tables with mixin cols.
    """
    t1 = QTable(MIXIN_COLS)
    t2 = QTable(MIXIN_COLS)
    with pytest.raises(NotImplementedError):
        vstack([t1, t2])
예제 #4
0
def combine_isochrones(isoc_list):
    """ combine isochrone Tables into 1 Table"""
    if isinstance(isoc_list[0], Table):
        # assume that these data are all Table
        comb_isoc = vstack(isoc_list)
    else:
        for i in xrange(isoc_list):
            isoc_list[i] = Table(isoc_list[i])
        comb_isoc = vstack(isoc_list)
    return comb_isoc
예제 #5
0
def match_two_cats(ref_cats_file,test_cats_file):
    # Set the debugging level
    lvl = logging.INFO
    logging.basicConfig(format='%(message)s', level=lvl, stream=sys.stdout)
    log = logging.getLogger('__name__')

    #get lists of tractor cats to compare
    fns_1= read_lines(ref_cats_file) 
    fns_2= read_lines(test_cats_file) 
    log.info('Comparing tractor catalogues: ')
    for one,two in zip(fns_1,fns_2): log.info("%s -- %s" % (one,two)) 
    #if fns_1.size == 1: fns_1,fns_2= [fns_1],[fns_2]
    #object to store concatenated matched tractor cats
    ref_matched = []
    ref_missed = []
    test_matched = []
    test_missed = []
    d_matched= 0.
    deg2= dict(ref=0.,test=0.,matched=0.)
    #for cnt,cat1,cat2 in zip(range(len(fns_1)),fns_1,fns_2):
    for cnt,cat1,cat2 in zip([0],[fns_1[0]],[fns_2[0]]):
        log.info('Reading %s -- %s' % (cat1,cat2))
        ref_tractor = Table(fits.getdata(cat1, 1))
        test_tractor = Table(fits.getdata(cat2, 1))
        m1, m2, d12 = match_radec(ref_tractor['ra'].copy(), ref_tractor['dec'].copy(),\
                                  test_tractor['ra'].copy(), test_tractor['dec'].copy(), \
                                  1.0/3600.0)
        miss1 = np.delete(np.arange(len(ref_tractor)), m1, axis=0)
        miss2 = np.delete(np.arange(len(test_tractor)), m2, axis=0)
        log.info('matched %d/%d' % (len(m2),len(test_tractor['ra'])))

        # Build combined catalogs
        if len(ref_matched) == 0:
            ref_matched = ref_tractor[m1]
            ref_missed = ref_tractor[miss1]
            test_matched = test_tractor[m2]
            test_missed = test_tractor[miss2]
            d_matched= d12
        else:
            ref_matched = vstack((ref_matched, ref_tractor[m1]))
            ref_missed = vstack((ref_missed, ref_tractor[miss1]))
            test_matched = vstack((test_matched, test_tractor[m2]))
            test_missed = vstack((test_missed, test_tractor[miss2]))
            d_matched= np.concatenate([d_matched, d12])
        deg2['ref']+= deg2_lower_limit(ref_tractor['ra'],ref_tractor['dec'])
        deg2['test']+= deg2_lower_limit(test_tractor['ra'],test_tractor['dec'])
        deg2['matched']+= deg2_lower_limit(ref_matched['ra'],ref_matched['dec'])
    
    return dict(ref_matched = ref_matched,
                ref_missed = ref_missed,
                test_matched = test_matched,
                test_missed = test_missed,
                d_matched= d_matched,
                deg2= deg2)
예제 #6
0
def join_starcheck_telem(fids_starcheck, fids_telem):
    """
    Remake dict of tables into a single table for each structure
    """
    # Stack the dict of tables into a single table
    t_fids_starcheck = table.vstack([fids_starcheck[obsid] for obsid in sorted(fids_starcheck)])
    t_fids_telem = table.vstack([fids_telem[obsid] for obsid in sorted(fids_telem)])

    # Join on obsid and slot columns into a single table
    starcheck_telem = table.join(t_fids_starcheck, t_fids_telem, keys=['obsid', 'slot'])

    # Reject unacquired fids
    ok = starcheck_telem['aoacyan'] > -3276
    return starcheck_telem[ok]
예제 #7
0
def update_mass_table(drpall, mass_table_old=None, limit=None, mlband='i'):
    '''
    '''
    
    # what galaxies are available to aggregate?
    res_fnames = glob(os.path.join(basedir, 'results/*-*/*-*_res.fits'))[:limit]

    # filter out whose that have not been done
    if mass_table_old is None:
        already_aggregated = [False for _ in range(len(res_fnames))]
    else:
        already_aggregated = [os.path.split(fn)[1].split('_')[0] in old_mass_table['plateifu']
                              for fn in res_fnames]
    res_fnames = [fn for done, fn in zip(already_aggregated, res_fnames)]

    # aggregate individual galaxies, and stack them 
    mass_tables_new = list(ProgressBar.map(
        partial(mass_agg_onegal, mlband=mlband), res_fnames, multiprocess=False, step=5))
    mass_table_new = t.vstack(mass_tables_new)

    # if there was an old mass table, stack it with the new one
    if mass_table_old is None:
        mass_table = mass_table_new
    else:
        mass_table = t.vstack([mass_table_old, mass_table_new], join_type='inner')

    cmlr = totalmass.cmlr_kwargs
    missing_flux =  (mass_table['nsa_absmag'].to(m.Mgy) - \
                     mass_table['ifu_absmag'].to(m.Mgy)).clip(
                        a_min=0.*m.Mgy, a_max=np.inf*m.Mgy)
    mag_missing_flux = missing_flux.to(u.ABmag)
    cb1, cb2 = cmlr['cb1'], cmlr['cb2']
    color_missing_flux = mag_missing_flux[:, totalmass.StellarMass.bands_ixs[cb1]] - \
                         mag_missing_flux[:, totalmass.StellarMass.bands_ixs[cb2]]
    color_missing_flux[~np.isfinite(color_missing_flux)] = np.inf
    mass_table['outer_ml_cmlr'] = np.polyval(cmlr['cmlr_poly'], color_missing_flux.value) * \
                                  u.dex(m.m_to_l_unit)
    mass_table['outer_lum'] = mag_missing_flux.to(
        u.dex(m.bandpass_sol_l_unit),
        totalmass.bandpass_flux_to_solarunits(totalmass.StellarMass.absmag_sun))

    mass_table['outer_mass_ring'] = \
        (mass_table['outer_lum'][:, totalmass.StellarMass.bands_ixs['i']] + \
         mass_table['outer_ml_ring']).to(u.Msun)
    mass_table['outer_mass_cmlr'] = \
        (mass_table['outer_lum'][:, totalmass.StellarMass.bands_ixs['i']] + \
         mass_table['outer_ml_cmlr']).to(u.Msun)

    return mass_table
def drop_lines(dataset, drop_fractions):
    #inserting all supernovae into a single table
    for supernova in dataset.data.keys():
        new_sna=dataset.data[supernova].copy()
        new_col=Table.Column(name='supernova', data=np.repeat(supernova, len(dat.data[supernova])))
        new_sna.add_column(new_col, index=0)
        if supernova==dataset.data.keys()[0]:
            data_table=new_sna
        else:
            data_table=vstack([data_table, new_sna], metadata_conflicts='silent')
    #in each band, determine the number of lines to drop, and drop them        
    for band in unique(data_table, keys='filter')['filter']:
        band_indices=np.where(data_table['filter']==band)
        num_drops=int(np.floor(len(data_table[band_indices])*drop_fractions[band]))
        #print 'Dropping '+str(num_drops)+' lines of '+str(len(data_table))+' ...'
        if num_drops > 0:
            lines_to_drop=np.random.choice(band_indices[0], num_drops, replace=False)
        else:
            lines_to_drop=[]
        data_table.remove_rows(lines_to_drop)
    #make copy of the original dataset, place rows from data_table in there
    new_dataset=copy.deepcopy(dataset)
    types=dataset.get_types()
    new_types=new_dataset.get_types()
    for supernova in dataset.data.keys():
        new_dataset.data[supernova]=data_table[data_table['supernova']==supernova]
        new_dataset.data[supernova].remove_column('supernova')
  
    return new_dataset
예제 #9
0
def accessTabZranges(lineconfig = 'wOIII_nHaNIISII', survey='sdss', joinsurveys=True):
    """
    access zranges table

    PARAMS
    ----------
    lineconfig = 'wOIII_nHaNIISII'
        if 'all' than all the tables for all the line cofigs available in the survey is combined and returned
    surver = 'sdss'
    joinsurveys=True
    """

    if lineconfig !='all':
        filenames = ['zranges_band_'+lineconfig+'.txt']
        tab = accessFile(filename=f, survey=survey, joinsurveys=joinsurveys)

    else:
        dir_survey = getlocalpath()+survey+'/'
        filenames = glob.glob(dir_survey+'zranges_band_*.txt')
        tab = at.Table()
        for f in filenames:
            tabnew = at.Table.read(f, format='ascii')
            tab = at.vstack([tab, tabnew])

        print(filenames)

    return tab
예제 #10
0
def get_fits_catalog(args, index_table):
    """Makes catalog containing information about parametric fits to the galaxies.
    Columns are identical to COSMOS Real Galaxy catalog"""   
    print "Creating fits catalog" 
    all_seg_ids = np.loadtxt(args.seg_list_file, delimiter=" ",dtype='S2')
    for f, filt in enumerate(args.filter_names):
    	final_table = fits_table()
        for seg_id in all_seg_ids:
            file_name = args.main_path + seg_id + '/' + filt + '_with_pstamp.fits'
            seg_cat = Table.read(file_name, format='fits')
            q, = np.where(index_table['SEG_ID'] == seg_id)
            indx_seg = index_table[q]
            temp = join(seg_cat, indx_seg, keys='NUMBER')
            temp.rename_column('MAG_AUTO', 'mag_auto')
            temp.rename_column('FLUX_RADIUS', 'flux_radius')            
            col = Column(temp['stamp_flux'], name='flux')
            temp.add_column(col)
            final_table = vstack([final_table,temp], join_type='inner')
        path = args.main_path + args.out_dir 
        index_table.sort('ORDER')
        ord_indx = [np.where(i_t==final_table['IDENT'])[0][0] for i_t in index_table['IDENT']]
        file_name = args.fits_file_name.replace('filter', args.file_filter_name[f])
        print "Savings fits file at ", path + file_name
        final_table[ord_indx].write(path + file_name, format='fits',
                                                overwrite=True)
예제 #11
0
def assign_num(args):
    """Assigns individual identification number to each object"""
    seed =122
    np.random.seed(seed)
    print "Assigning number"
    names = ('SEG_ID', 'NUMBER', 'IDENT')
    dtype = ('string', 'int', 'int') 
    index_table = Table(names=names, dtype = dtype)
    ident = 0
    #objects detected are same in all filters. So getting objects in first filter
    #is sufficient
    filt = args.filter_names[0]
    all_seg_ids = np.loadtxt(args.seg_list_file, delimiter=" ",dtype='S2')
    for seg_id in all_seg_ids:
        file_name = args.main_path + seg_id + '/' + filt + '_with_pstamp.fits'
        catalog = Table.read(file_name, format='fits')
        idents = range(ident,ident+len(catalog))
        seg_ids = [seg_id]*len(catalog)
        numbers = catalog['NUMBER']       
        temp = Table([seg_ids, numbers,idents],names=names, dtype = dtype)
        index_table = vstack([index_table,temp])
        ident+=len(catalog)
    shuffle_idents = range(len(index_table))
    np.random.shuffle(shuffle_idents)
    index_table= index_table[shuffle_idents]
    order_idents = range(len(index_table))
    file_nums = np.array(order_idents)/1000 + 1
    hdus= np.zeros(len(order_idents))
    names = ('ORDER', 'FILE_NUM', 'HDU')
    dtype = ('int' ,'int', 'int')
    temp = Table([order_idents,file_nums,hdus], names=names, dtype=dtype)
    index_table = hstack([index_table,temp])
    cat_name = args.main_path + 'index_table_' + args.cat_name.replace('filter', '')
    return index_table
예제 #12
0
def readps1_coordinates(ra,dec,outfile=None,path='/line12/Pan-STARRS/chunks-qz-star-v2/',silent=True):
    """
    This program aims to get PS1 catalog where objects (ra,dec) located 
    ra,dec: in degrees, scalar
    """
    ra=np.array(ra)
    dec=np.array(dec)
    phi=np.deg2rad(ra)
    theta=np.deg2rad(90.0-dec)
    ipring=ang2pix(32,theta,phi)
    pix = np.unique(ipring)
    npix=pix.size
    if not silent:
        print('Total '+str(npix)+' healpix pixels:')
    cat=[]
    for ipix in pix:
        catname=os.path.join(path,'ps1-'+'%5.5d'%ipix+'.fits')
        if not os.path.isfile(catname):
            continue
        if not silent:
            print('reading '+catname)
        cat.append(table.Table.read(catname,format='fits'))
    totalcat = []
    if cat != []:
        totalcat=table.vstack(cat)
        if outfile is not None:
            totalcat.write(outfile,format='fits',overwrite=True)
    return totalcat
예제 #13
0
def _tile_objcat(ad, shifts):
    """
    This produces a single Table instance combining all the individual
    OBJCATs, with X_IMAGE and Y_IMAGE updated to account for the tiling,
    and NUMBER changed to avoid duplications.

    Parameters
    ----------
    ad: astrodata
        input AD instance (with OBJCATs)
    shifts: list of 2-tuples
        array shifts (x,y) from original extension into tiled image

    Returns
    -------
    Table: the tiled OBJCAT
    """
    tiled_objcat = None
    for ext, shift in zip(ad, shifts):
        try:
            objcat = ext.OBJCAT
        except AttributeError:
            pass
        else:
            objcat['X_IMAGE'] += shift[0]
            objcat['Y_IMAGE'] += shift[1]
            if tiled_objcat:
                objcat['NUMBER'] += max(tiled_objcat['NUMBER'])
                tiled_objcat = vstack([tiled_objcat, objcat],
                                      metadata_conflicts='silent')
            else:
                tiled_objcat = objcat

    return tiled_objcat
예제 #14
0
def main():

    usage = "usage: %(prog)s [config files]"
    description = "Merge tables."
    parser = argparse.ArgumentParser(usage=usage,description=description)

    parser.add_argument('--output', default = None, required=True)   
    parser.add_argument('files', nargs='*', default = None,
                        help='One or more FITS files containing BINTABLEs.')

    args = parser.parse_args()
    
    h0 = pyfits.open(args.files[0])

    tables = []

    for f in sorted(args.files):

        print f

        tables += [Table.read(f)]


    tab = vstack(tables)
    tab.write(args.output,format='fits',overwrite=True)

    h = pyfits.open(args.output)
    h0[1] = h[1]
    h0.writeto(args.output,clobber=True)
def dict_to_fits(dict, clusters):
    # Recombining table_dict into a single table
    fits_table = dict[1][[]]
    for i in range(1, clusters + 1):
        for key in dict.keys():
            fits_table = vstack([fits_table, dict[key]])
    return fits_table
예제 #16
0
def read_aat():

	aat_path  = SAGA_DROPBOX + '/Spectra/Final/AAT/'
	aat_files = glob.glob(aat_path+'*zlog')

	n=0
	for afile in aat_files:	

		# ACCEPT ALL GOOD SPECTRA
		adata = ascii.read(afile, data_start=0, delimiter=' ')
		msk = adata.field('col7') >= 1  # ONLY OBJECTS WITH QUALITY GE 1
		aat = adata[msk]

		# PLACE HOLDER ARRAYS	
		one = np.ones(len(aat))
		telname = ['AAT' for i in one]
		spec_repeat = ['AAT' for i in one]
		maskid = [afile for i in one]
		maskid = [x.split(SAGA_DROPBOX,1)[1] for x in maskid]


	   # CREATE MMT SPEC TABLE
		aat_table1 = table.table.Table([aat['col2'], aat['col3'], maskid, aat['col8'],\
									    aat['col5'], aat['col7'], telname,spec_repeat], \
			     				        names=('RA', 'DEC', 'MASKNAME','specobjid',\
			     		                       'SPEC_Z','ZQUALITY','TELNAME','SPEC_REPEAT'))


	   # CREATE OR APPEND TO AAT TABLE	
		if (n==0):  aat_table = aat_table1
		if (n > 0): aat_table = table.vstack([aat_table,aat_table1])
		n=n+1
	print "Number of AAT Files = ",n	
	return aat_table
예제 #17
0
def accessFile(filename='zranges_band_wOIII_nHaNIISII.txt', survey='sdss', joinsurveys=True):
    """
    access files in filters/ such as:
        HaNIIredshiftrange0.2.txt
        OIIIredshiftrange0.6.txt

    PARAMS
    ---------
    filename='OIIIredshiftrange0.6.txt'
    survey='sdss': (string)
            for joined surveys use '-', e.g., hsc-ukirt
    joinsurveys=True (string)
            if true then if survey/filename does not exist, for joined surveys go into each directories/files and merge file
    """

    filepath = getlocalpath()+survey+'/'+filename
    if os.path.isfile(filepath):
        tab = at.Table.read(filepath, format='ascii')
    elif ('-' in survey) and joinsurveys: 
        surveys = survey.split('-')
        tab = at.Table()
        for s in surveys:
            filepath = getlocalpath()+s+'/'+filename
            tabnew = at.Table.read(filepath, format='ascii')
            tab = at.vstack([tab, tabnew])
    else:
        raise NameError('File does not exist: '+filepath)

    return tab
예제 #18
0
    def make_total_index_table(self, data_store, modeltype, out_dir_background_model=None,
                               filename_obs_group_table=None, smooth=False):
        """Create a hdu-index table with a row containing the link to the background model for each observation.

        Parameters
        ----------
        data_store : `~gammapy.data.DataStore`
             `DataStore` for the runs for which ones we want to compute a background model
        modeltype : {'3D', '2D'}
            Type of the background modelisation
        out_dir_background_model :  str
            directory where are located the backgrounds models for each group
        filename_obs_group_table : str
            name of the file containing the `~astropy.table.Table` with the group infos
        smooth : bool
            True if you want to use the smooth bkg model

        Returns
        -------
        index_table_new : `~astropy.table.Table`
            Index hdu table with a background row
        """

        index_table_bkg = self.make_bkg_index_table(data_store, modeltype, out_dir_background_model,
                                                    filename_obs_group_table, smooth)
        index_bkg = np.where(data_store.hdu_table["HDU_CLASS"] == "bkg_3d")[0].tolist()
        data_store.hdu_table.remove_rows(index_bkg)
        index_table_new = vstack([data_store.hdu_table, index_table_bkg])
        return index_table_new
예제 #19
0
    def test_daogroup_three(self):
        """
           1 +--+-------+--------+--------+--------+-------+--------+--+
             |                                                         |
             |                                                         |
             |                                                         |
         0.5 +                                                         +
             |                                                         |
             |                                                         |
           0 +  *   *   *    *   *                 *   *   *    *   *  +
             |                                                         |
             |                                                         |
        -0.5 +                                                         +
             |                                                         |
             |                                                         |
             |                                                         |
          -1 +--+-------+--------+--------+--------+-------+--------+--+
                0      0.5       1       1.5       2      2.5       3
        """

        first_group = Table([np.linspace(0, 1, 5), np.zeros(5),
                             np.arange(5) + 1, np.ones(5, dtype=np.int)],
                            names=('x_0', 'y_0', 'id', 'group_id'))
        second_group = Table([np.linspace(2, 3, 5), np.zeros(5),
                              6 + np.arange(5), 2*np.ones(5, dtype=np.int)],
                             names=('x_0', 'y_0', 'id', 'group_id'))
        starlist = vstack([first_group, second_group])
        daogroup = DAOGroup(crit_separation=0.3)
        test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
        assert_array_equal(starlist, test_starlist)
def s2n(t,FUV,tnorm=1000):
    # load cos etc simulations for flat spectrum 1000s exp, and FUV = 17 mag
    cos_g130m = ascii.read('cos_etc_g130m_v24.1.csv')
    cos_g160m = ascii.read('cos_etc_g160m_v24.1.csv')
    #separate them at 1400 A
    cond = cos_g130m['wavelength'] < 1405
    cos_g130m = cos_g130m[cond]
    cond = cos_g160m['wavelength'] >= 1405
    cos_g160m = cos_g160m[cond]
    #merge both
    cos = vstack([cos_g130m,cos_g160m], join_type='exact')
    
    # Signal
    signal = cos['target_counts']*t/tnorm * 10.**((FUV-17.)/(-2.5))
    
    # Noise terms
    dark = cos['dark_counts']*t/tnorm
    sky = cos['sky_counts']*t/tnorm
        
    # Noise
    var = signal + dark + sky
    sig = np.sqrt(var)
    
    #append S/N to cos
    sn = signal/sig * np.sqrt(6) # per-resolution element of 3 pixels
    cos.add_column(Column(name='sn', data=sn))
    return cos
예제 #21
0
    def stack(cls, flux_points):
        """Create flux points by stacking list of flux points.

        The first `FluxPoints` object in the list is taken as a reference to infer
        column names and units for the stacked object.

        Parameters
        ----------
        flux_points : list of `FluxPoints`
            List of flux points to stack.

        Returns
        -------
        flux_points : `FluxPoints`
            Flux points without upper limit points.
        """
        reference = flux_points[0].table

        tables = []
        for _ in flux_points:
            table = _.table
            for colname in reference.colnames:
                column = reference[colname]
                if column.unit:
                    table[colname] = table[colname].quantity.to(column.unit)
            tables.append(table[reference.colnames])

        table_stacked = vstack(tables)
        table_stacked.meta["SED_TYPE"] = reference.meta["SED_TYPE"]

        return cls(table_stacked)
예제 #22
0
파일: bokrmphot.py 프로젝트: imcgreer/bokrm
def load_raw_bok_aperphot(dataMap,targetName,season=None,old=False):
	if old:
		# renaming
		pfx = {'sdssstarsold':'sdssbright'}.get(targetName,targetName)
		aperCatDir = os.environ['HOME'] + \
		                '/data/projects/SDSS-RM/rmreduce/catalogs_v2b/'
	else:
		aperCatDir = os.path.join(dataMap.procDir,'catalogs')
		pfx = targetName+'_aper'
	allTabs = []
	for utd in dataMap.iterUtDates():
		if ( season is not None and
		      not ( utd.startswith(season) or 
		              (utd.startswith('2013') and season=='2014') ) ):
			continue
		print 'loading catalogs from ',utd
		for filt in dataMap.iterFilters():
			if old and utd=='20131223':
				utd = '20131222'
			aperCatFn = '.'.join([pfx,utd,filt,'cat','fits'])
			aperCatF = os.path.join(aperCatDir,aperCatFn)
			if os.path.exists(aperCatF):
				if old:
					tab = _read_old_catf(dataMap.obsDb,aperCatF)
				else:
					tab = Table.read(aperCatF)
#				tab['filter'] = filt # handy to save this here
				allTabs.append(tab)
	#
	phot = vstack(allTabs)
	print 'stacked aperture phot catalogs into table with ',len(phot),' rows'
	#self.phot.sort(['objId','frameIndex'])
	phot.sort('objId')
	return phot
예제 #23
0
파일: ggg.py 프로젝트: pyigm/igmspec
def grab_meta():
    """ Grab GGG meta Table
    Returns
    -------

    """
    # This table has units in it!
    meta = Table.read(os.getenv('RAW_IGMSPEC')+'/GGG/GGG_catalog.fits.gz')
    nqso = len(meta)
    # Turn off RA/DEC units
    for key in ['RA', 'DEC']:
        meta[key].unit = None
    meta.rename_column('RA', 'RA_GROUP')
    meta.rename_column('DEC', 'DEC_GROUP')
    #
    # Add zem
    meta['zem_GROUP'] = meta['z_gmos']
    meta['sig_zem'] = meta['zerror_gmos']
    meta['flag_zem'] = [str('GGG')]*nqso
    meta.add_column(Column([2000.]*nqso, name='EPOCH'))
    #
    meta['STYPE'] = [str('QSO')]*nqso
    # Double up for the two gratings
    ggg_meta = vstack([meta,meta])
    # Check
    assert chk_meta(ggg_meta, chk_cat_only=True)
    # Return
    return ggg_meta
예제 #24
0
파일: nicer_obs.py 프로젝트: demorest/PINT
    def __init__(self, name, FPorbname, tt2tdb_mode = 'pint'):


        if FPorbname.startswith('@'):
            # Read multiple orbit files names
            FPlist = []
            fnames = [ll.strip() for ll in open(FPorbname[1:]).readlines()]
            for fn in fnames:
                FPlist.append(load_FPorbit(fn))
            self.FPorb = vstack(FPlist)
            # Make sure full table is sorted
            self.FPorb.sort('MJD_TT')
        else:
            self.FPorb = load_FPorbit(FPorbname)
        # Now build the interpolator here:
        self.X = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['X'])
        self.Y = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Y'])
        self.Z = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Z'])
        self.Vx = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vx'])
        self.Vy = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vy'])
        self.Vz = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vz'])
        super(NICERObs, self).__init__(name=name, tt2tdb_mode=tt2tdb_mode)
        # Print this warning once, mainly for @paulray
        if self.tt2tdb_mode.lower().startswith('pint'):
            log.debug('Using location=None for TT to TDB conversion (pint mode)')
        elif self.tt2tdb_mode.lower().startswith('geo'):
            log.warning('Using location geocenter for TT to TDB conversion')
예제 #25
0
파일: utils.py 프로젝트: jnburchett/joebvp
def concatenate_line_tables(filelist,outtablefile='compiledVPoutputs.dat'):
    '''
    Compiles the output from several fitting runs into a single table

    Parameters
    ----------
    filelist : list of strings or str
        This should be a list containing the names of VP input files or a string referring to a file simply
        listing the input files.
        See joebvpfit.readpars for details of file format

    outtablefile : str
        Name of compiled model parameter output file

    '''

    if isinstance(filelist, str):
        lstarr=np.genfromtxt(filelist,dtype=None)
        listofiles=lstarr.tolist()
    else:
        listofiles=filelist

    tabs = []
    for i, ff in enumerate(listofiles):
        tabs.append(ascii.read(ff))
    bigpartable = vstack(tabs)
    ascii.write(bigpartable, output=outtablefile, delimiter='|')  # write out compiled table
예제 #26
0
def DoPhotometryofNight(PC,night):
    """ Does photometry of all images in the night as well as create the final magnitude catlog table """
    with open(os.path.join(PC.OUTDIR,night,PC.OUTFITSFILELIST),'r') as outfitsfilelist:
        # Skip blank lines and Commented out lines with #
        imgfilterlist = [tuple(imageLINE.rstrip().split()) for imageLINE in outfitsfilelist 
                         if ((imageLINE.strip() is not '') and (imageLINE[0] !='#'))]

    for OutFinalImage,Filtr in imgfilterlist:
        MagTable = RunPhotometryofImage(OutFinalImage,Filtr)

        ## Append differential 2MASS magnitude of all sources to table
        TableHeaders = ['ra','dec','mag','magerror','Qflag']#filter,epoch,ImgFile
        filter_col = Column(name='Filter', data=[Filtr]*len(MagTable))
        epoch_col = Column(name='Epoch', data=[epoch]*len(MagTable))
        imgfile_col = Column(name='ImgFile', data=[OutFinalImage]*len(MagTable))
        TableToOutput = MagTable[TableHeaders]
        TableToOutput.add_columns([filter_col, epoch_col, imgfile_col])

        # Now append the Full output table also to an ascii file.
        OutputTableFilename = os.path.join(PC.OUTDIR,PC.OUTPUTFILE)
        try :
            PreviousFullTable = ascii.read(OutputTableFilename,delimiter=',',
                                           format='commented_header',fill_values=('--','0'))
        except IOError :
            logger.info('No previous photometry output found, hence we will be creating'
                        ' a new file {0}.'.format(OutputTableFilename))
            OutputTableToWrite = TableToOutput
        else :
            OutputTableToWrite = table.vstack([PreviousFullTable,TableToOutput], join_type='outer')
        # Writing the final appended table
        ascii.write(OutputTableToWrite, OutputTableFilename,delimiter=',',format='commented_header')

        logger.info("Photometry of {0} over.".format(OutFinalImage))
 def __init__(self,start_sci=48,end_sci=301,raw_dir='bigdog',
             start_sky=35,end_sky=44,edit_dir='edited',
             start_flat=23,end_flat=34,
             start_arc=11,end_arc=22,
             start_dark=1,end_dark=10):
     """ 
     This Class Takes Raw IRTF and pre-processes them for the IRAF/IDL 
     Pipeline
     
     Parameters
     --------------
     start_sci: int
         The starting image number for science data
     end_sci: int
         The ending image number for science data
     raw_dir: str
         The raw directory for SpeX Spectrograph (Bigdog) images
     """
     self.start_sci = start_sci
     self.end_sci = end_sci
     
     self.raw_dir = raw_dir
     self.edit_dir = edit_dir
     
     self.sciTable = self.clean_names('run',start_ind=start_sci,end_ind=end_sci,
                                      usedName='spc')
     self.skyTable = self.clean_names('runsky',start_ind=start_sky,end_ind=end_sky,
                                      usedName='spc')
     self.flatTable = self.clean_names('flat',start_ind=start_flat,end_ind=end_flat)
     self.arcTable = self.clean_names('arc',start_ind=start_arc,end_ind=end_arc)
     self.darkTable = self.clean_names('dark',start_ind=start_dark,end_ind=end_dark,
                                       usedName='spc')
     self.allTables = vstack([self.sciTable,self.skyTable,self.flatTable,
                             self.arcTable,self.darkTable])
예제 #28
0
파일: etcs.py 프로젝트: ntejos/pyntejos
def s2n_COS(t, FUV, tnorm=1000, v="27.1"):
    # load cos etc simulations for flat spectrum 1000s exp, and FUV = 17 mag
    data_path('cos_etc_g130m_v26.1.csv')
    if v == '26.1':
        cos_g130m = ascii.read(data_path('cos_etc_g130m_v26.1.csv'))
        cos_g160m = ascii.read(data_path('cos_etc_g160m_v26.1.csv'))
    elif v=='27.1':
        cos_g130m = ascii.read(data_path('cos_etc_g130m_v27.1.csv'))
        cos_g160m = ascii.read(data_path('cos_etc_g160m_v27.1.csv'))
    else:
        raise ValueError("The current version is not implemented or its out of date.")

    #separate them at ~1450 A
    cond = cos_g130m['wavelength'] < 1440
    cos_g130m = cos_g130m[cond]
    cond = cos_g160m['wavelength'] >= 1440
    cos_g160m = cos_g160m[cond]
    # merge both
    cos = vstack([cos_g130m, cos_g160m], join_type='exact')

    # Signal
    signal = cos['target_counts'] * t / tnorm * 10.**((FUV- 17.)/(-2.5))

    # Noise terms
    dark = cos['dark_counts'] * t / tnorm
    sky = cos['sky_counts'] * t / tnorm

    # Noise
    var = signal + dark + sky
    sig = np.sqrt(var)

    #append S/N to cos
    sn = signal/sig * np.sqrt(6) # per-resolution element of 6 pixels
    cos['sn'] = sn
    return cos
예제 #29
0
def read_imacs():

	imacs_path  = SAGA_DROPBOX + '/Spectra/Final/IMACS/'
	imacs_files = glob.glob(imacs_path+'*zlog')

	n=0
	for ifile in imacs_files:	

		# ACCEPT ALL GOOD SPECTRA
		idata = ascii.read(ifile,guess=False,format='no_header', delimiter=' ',names=['specid','ra','dec','col4','z','col6','zq','col8','col9','col10','slitid','col12'])
		msk   = idata.field('zq') >= 1  

		imacs = idata[msk]

		# PLACE HOLDER ARRAYS	
		one = np.ones(len(imacs))
		telname = ['IMACS' for i in one]
		spec_repeat = ['IMACS' for i in one]
		maskid = [ifile for i in one]
		maskid = [x.split(SAGA_DROPBOX,1)[1] for x in maskid]


	   # CREATE MMT SPEC TABLE
		imacs_table1 = table.table.Table([imacs['ra'], imacs['dec'], imacs['slitid'],imacs['col8'],\
									    imacs['z'], imacs['zq'], telname,spec_repeat], \
			     				        names=('RA', 'DEC', 'MASKNAME','specobjid',\
			     		                       'SPEC_Z','ZQUALITY','TELNAME','SPEC_REPEAT'))


	   # CREATE OR APPEND TO imacs TABLE	
		if (n==0):  imacs_table = imacs_table1
		if (n > 0): imacs_table = table.vstack([imacs_table,imacs_table1])
		n=n+1
	print "Number of IMACS Files = ",n	
	return imacs_table
예제 #30
0
def read_snana_ascii_multi(fnames, default_tablename=None):
    """Like ``read_snana_ascii()``, but read from multiple files containing
    the same tables and glue results together into big tables.

    Parameters
    ----------
    fnames : list of str
        List of filenames.

    Returns
    -------
    tables : dictionary of `~astropy.table.Table`
        Tables indexed by table names.

    Examples
    --------
    >>> tables = read_snana_ascii_multi(['data1.txt', 'data1.txt'])

    """

    alltables = {}
    for fname in fnames:
        meta, tables = read_snana_ascii(fname,
                                        default_tablename=default_tablename)

        for key, table in six.iteritems(tables):
            if key in alltables:
                alltables[key].append(table)
            else:
                alltables[key] = [table]

    for key in alltables.keys():
        alltables[key] = vstack(alltables[key])

    return alltables
예제 #31
0
    def to_table(self,
                 sed_type="likelihood",
                 format="gadf-sed",
                 formatted=False):
        """Create table for a given SED type.

        Parameters
        ----------
        sed_type : {"likelihood", "dnde", "e2dnde", "flux", "eflux"}
            Sed type to convert to. Default is `likelihood`
        format : {"gadf-sed", "lightcurve", "binned-time-series", "profile"}
            Format specification. The following formats are supported:

                * "gadf-sed": format for sed flux points see :ref:`gadf:flux-points`
                  for details
                * "lightcurve": Gammapy internal format to store energy dependent
                  lightcurves. Basically a generalisation of the "gadf" format, but
                  currently there is no detailed documentation available.
                * "binned-time-series": table format support by Astropy's
                  `~astropy.timeseries.BinnedTimeSeries`.
                * "profile": Gammapy internal format to store energy dependent
                  flux profiles. Basically a generalisation of the "gadf" format, but
                  currently there is no detailed documentation available.

        formatted : bool
            Formatted version with column formats applied. Numerical columns are
            formatted to .3f and .3e respectively.

        Examples
        --------

        This is how to read and plot example flux points:

            >>> from gammapy.estimators import FluxPoints
            >>> fp = FluxPoints.read("$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits")
            >>> table = fp.to_table(sed_type="flux", format="gadf-sed", formatted=True)
            >>> print(table[:2])
            e_ref e_min e_max     flux      flux_err    flux_ul      ts    sqrt_ts is_ul
             TeV   TeV   TeV  1 / (cm2 s) 1 / (cm2 s) 1 / (cm2 s)
            ----- ----- ----- ----------- ----------- ----------- -------- ------- -----
            1.334 1.000 1.780   1.423e-11   3.135e-13         nan 2734.000  52.288 False
            2.372 1.780 3.160   5.780e-12   1.082e-13         nan 4112.000  64.125 False

        Returns
        -------
        table : `~astropy.table.Table`
            Flux points table
        """
        if format == "gadf-sed":
            # TODO: what to do with GTI info?
            if not self.geom.axes.names == ["energy"]:
                raise ValueError("Only flux points with a single energy axis "
                                 "can be converted to 'gadf-sed'")

            idx = (Ellipsis, 0, 0)
            table = self.energy_axis.to_table(format="gadf-sed")
            table.meta["SED_TYPE"] = sed_type

            if self.n_sigma_ul:
                table.meta["UL_CONF"] = np.round(
                    1 - 2 * stats.norm.sf(self.n_sigma_ul), 7)

            if sed_type == "likelihood":
                table["ref_dnde"] = self.dnde_ref[idx]
                table["ref_flux"] = self.flux_ref[idx]
                table["ref_eflux"] = self.eflux_ref[idx]

            for quantity in self.all_quantities(sed_type=sed_type):
                data = getattr(self, quantity, None)
                if data:
                    table[quantity] = data.quantity[idx]

            if self.has_stat_profiles:
                norm_axis = self.stat_scan.geom.axes["norm"]
                table["norm_scan"] = norm_axis.center.reshape((1, -1))
                table["stat"] = self.stat.data[idx]
                table["stat_scan"] = self.stat_scan.data[idx]

            table["is_ul"] = self.is_ul.data[idx]

        elif format == "lightcurve":
            time_axis = self.geom.axes["time"]

            tables = []
            for idx, (time_min,
                      time_max) in enumerate(time_axis.iter_by_edges):
                table_flat = Table()
                table_flat["time_min"] = [time_min.mjd]
                table_flat["time_max"] = [time_max.mjd]

                fp = self.slice_by_idx(slices={"time": idx})
                table = fp.to_table(sed_type=sed_type, format="gadf-sed")

                for column in table.columns:
                    table_flat[column] = table[column][np.newaxis]

                tables.append(table_flat)

            table = vstack(tables)
        elif format == "binned-time-series":
            message = (
                "Format 'binned-time-series' support a single time axis "
                f"only. Got {self.geom.axes.names}")

            if not self.geom.axes.is_unidimensional:
                raise ValueError(message)

            axis = self.geom.axes.primary_axis

            if not isinstance(axis, TimeMapAxis):
                raise ValueError(message)

            table = Table()
            table["time_bin_start"] = axis.time_min
            table["time_bin_size"] = axis.time_delta

            for quantity in self.all_quantities(sed_type=sed_type):
                data = getattr(self, quantity, None)
                if data:
                    table[quantity] = data.quantity.squeeze()
        elif format == "profile":
            x_axis = self.geom.axes["projected-distance"]

            tables = []
            for idx, (x_min, x_max) in enumerate(x_axis.iter_by_edges):
                table_flat = Table()
                table_flat["x_min"] = [x_min]
                table_flat["x_max"] = [x_max]
                table_flat["x_ref"] = [(x_max + x_min) / 2]

                fp = self.slice_by_idx(slices={"projected-distance": idx})
                table = fp.to_table(sed_type=sed_type, format="gadf-sed")

                for column in table.columns:
                    table_flat[column] = table[column][np.newaxis]

                tables.append(table_flat)

            table = vstack(tables)

        else:
            raise ValueError(f"Not a supported format {format}")

        if formatted:
            table = self._format_table(table=table)

        return table
예제 #32
0
def main(argv=None):
    """ Main Function """

    parser = get_parser()
    args = parser.parse_args(argv)
    args.log = setup_logging()

    if args.pickle:
        args.fits = False

    if args.merge:

        if args.fits:
            master_table = Table()
            files = glob.glob(op.join(args.mergepath, "*.fits"))
            args.log.info("Merging all fits files in " + args.mergepath)

            for file in files:
                file_table = Table.read(open(file, "rb"))
                if np.size(file_table) > 0:
                    master_table = vstack([master_table, file_table])
            outfile = args.outfile + ".fits"
            master_table.write(outfile, format="fits", overwrite=True)

        else:
            all_source_dict = {}
            files = glob.glob(op.join(args.mergepath, "*.pkl"))
            args.log.info("Merging all pickle files in " + args.mergepath)
            for file in files:
                file_dict = pickle.load(open(file, "rb"))
                if len(file_dict) > 0:
                    all_source_dict = merge(all_source_dict, file_dict)

            outfile = args.outfile + ".pkl"
            pickle.dump(all_source_dict, open(outfile, "wb"))

        args.log.info("Saved output file to " + outfile)
        sys.exit("Exiting")

    if args.infile:

        args.log.info("Loading External File")

        try:
            try:
                table_in = Table.read(args.infile, format="ascii")
                if table_in.colnames == ["col1", "col2", "col3"]:
                    table_in["col1"].name = "ID"
                    table_in["col2"].name = "ra"
                    table_in["col3"].name = "dec"
                elif np.size(table_in.colnames) != 3:
                    args.log.info("Input file not in three column format")
            except Exception:
                pass
            try:
                table_in = Table.read(args.infile, format="fits")
            except Exception:
                pass
        except Exception:
            if op.exists(args.infile):
                args.log.warning("Could not open input file")
                sys.exit("Exiting")
            else:
                args.log.warning("Input file not found")
                sys.exit("Exiting")
        try:
            args.ID = table_in["ID"]
        except:
            args.ID = table_in["id"]

        try:
            args.ra = table_in["ra"]
            args.dec = table_in["dec"]
        except:
            args.ra = table_in["RA"]
            args.dec = table_in["DEC"]

    else:
        if args.ID == None:
            if np.size(args.ra) > 1:
                args.ID = str(np.arange(1, np.size(table_in) + 1)).zfill(9)
            else:
                args.ID = 1

        args.log.info("Extracting for ID: %s" % args.ID)

    # generate astropy coordinates object for searching

    if re.search(":", str(args.ra)):
        args.coords = SkyCoord(args.ra, args.dec, unit=(u.hourangle, u.deg))
    else:
        args.coords = SkyCoord(args.ra, args.dec, unit=u.deg)

    S = Survey(args.survey)

    if args.keep_bad_shots:
        ind_good_shots = np.ones_like(S.shotid, dtype=bool)
    else:
        ind_good_shots = S.remove_shots()

    if args.tpmin:
        ind_tp = S.response_4540 > args.tpmin
        args.survey_class = S[ind_good_shots * ind_tp]
    else:
        args.survey_class = S[ind_good_shots]

    # if args.shotidid exists, only select those shots

    if args.shotid:
        try:
            sel_shot = args.survey_class.shotid == int(args.shotid)
        except Exception:
            sel_shot = args.survey_class.datevobs == str(args.shotid)

        args.survey_class = args.survey_class[sel_shot]

    else:
        pass

    # main function to retrieve spectra dictionary
    Source_dict = get_spectra_dictionary(args)

    args.survey_class.close()

    if args.pickle:
        outfile = args.outfile + ".pkl"
        pickle.dump(Source_dict, open(outfile, "wb"))

    if args.single:
        # loop over every ID/observation combo:
        fluxden_u = 1e-17 * u.erg * u.s**(-1) * u.cm**(-2) * u.AA**(-1)
        for ID in Source_dict.keys():
            for shotid in Source_dict[ID].keys():

                wave_rect = 2.0 * np.arange(1036) + 3470.0
                spec = Source_dict[ID][shotid][0]
                spec_err = Source_dict[ID][shotid][1]
                weights = Source_dict[ID][shotid][2]

                sel = np.isfinite(spec)
                if np.sum(sel) > 0:
                    output = Table()

                    output.add_column(
                        Column(wave_rect, name="wavelength", unit=u.AA))
                    output.add_column(Column(spec, name="spec",
                                             unit=fluxden_u))
                    output.add_column(
                        Column(spec_err, name="spec_err", unit=fluxden_u))
                    output.add_column(Column(weights, name="weights"))

                    output.write("spec_" + str(ID) + "_" + str(shotid) +
                                 ".tab",
                                 format="ascii")

    if args.fits:
        output = return_astropy_table(Source_dict,
                                      fiberweights=args.fiberweights)
        if args.fiberweights:
            # cannot save fiberweights to a fits file
            output.remove_column('fiber_weights')
        output.write(args.outfile + ".fits", format="fits", overwrite=True)
def main():
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    for k, p in particles.items():
        log.info(f"Simulated {k.title()} Events:")
        p["events"], p["simulation_info"] = read_eventdisplay_fits(p["file"])

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])
        p["events"]["source_fov_offset"] = calculate_source_fov_offset(
            p["events"])
        # calculate theta / distance between reco and assuemd source positoin
        # we handle only ON observations here, so the assumed source pos
        # is the pointing position
        p["events"]["theta"] = calculate_theta(
            p["events"],
            assumed_source_az=p["events"]["pointing_az"],
            assumed_source_alt=p["events"]["pointing_alt"],
        )
        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    # background table composed of both electrons and protons
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    # event display uses much finer bins for the theta cut than
    # for the sensitivity
    theta_bins = add_overflow_bins(
        create_bins_per_decade(
            10**(-1.9) * u.TeV,
            10**2.3005 * u.TeV,
            50,
        ))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=theta_bins,
        min_value=0.05 * u.deg,
        fill_value=np.nan * u.deg,
        percentile=68,
    )

    # evaluate the theta cut
    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts, operator.le)
    # we make the background region larger by a factor of ALPHA,
    # so the radius by sqrt(ALPHA) to get better statistics for the background
    theta_cuts_bg = get_bg_cuts(theta_cuts, ALPHA)
    background["selected_theta"] = evaluate_binned_cut(
        background["theta"], background["reco_energy"], theta_cuts_bg,
        operator.le)

    # same bins as event display uses
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV,
                               10**2.31 * u.TeV,
                               bins_per_decade=5))

    log.info("Optimizing G/H separation cut for best sensitivity")
    sensitivity_step_2, gh_cuts = optimize_gh_cut(
        gammas[gammas["selected_theta"]],
        background[background["selected_theta"]],
        bins=sensitivity_bins,
        cut_values=np.arange(-1.0, 1.005, 0.05),
        op=operator.ge,
        alpha=ALPHA,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    theta_cuts_opt = calculate_percentile_cut(
        gammas["theta"],
        gammas["reco_energy"],
        theta_bins,
        fill_value=np.nan * u.deg,
        percentile=68,
        min_value=0.05 * u.deg,
    )

    theta_cuts_opt_bg = get_bg_cuts(theta_cuts_opt, ALPHA)

    for tab, cuts in zip([gammas, background],
                         [theta_cuts_opt, theta_cuts_opt_bg]):
        tab["selected_theta"] = evaluate_binned_cut(tab["theta"],
                                                    tab["reco_energy"], cuts,
                                                    operator.le)
        tab["selected"] = tab["selected_theta"] & tab["selected_gh"]

    signal_hist = create_histogram_table(gammas[gammas["selected"]],
                                         bins=sensitivity_bins)
    background_hist = create_histogram_table(
        background[background["selected"]], bins=sensitivity_bins)

    sensitivity = calculate_sensitivity(signal_hist,
                                        background_hist,
                                        alpha=ALPHA)

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    for s in (sensitivity_step_2, sensitivity):
        s["flux_sensitivity"] = s["relative_sensitivity"] * CRAB_HEGRA(
            s["reco_energy_center"])

    # write OGADF output file
    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(
            10**-1.9 * u.TeV,
            10**2.31 * u.TeV,
            10,
        ))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(
            10**-1.9 * u.TeV,
            10**2.31 * u.TeV,
            10,
        ))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = point_like_effective_area(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[...,
                               np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    bias_resolution = energy_bias_resolution(
        gammas[gammas["selected"]],
        true_energy_bins,
    )
    ang_res = angular_resolution(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
    )
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )

    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_bins,
                           fov_offset_bins,
                           rad_max=theta_cuts_opt["cut"][:, np.newaxis]))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))
    fits.HDUList(hdus).writeto("pyirf_eventdisplay.fits.gz", overwrite=True)
예제 #34
0
                                  uvb=uvb)
    # to efficiently save numpy array
    save_file_name = outpath + '/' + name
    np.save(save_file_name, flat_samples)

    out = [[q]]
    for i in range(ndim):
        mcmc = np.percentile(flat_samples[:, i], [16, 50, 84])
        q = np.diff(mcmc)
        out.append([mcmc[1]])
        out.append([q[0]])
        out.append([q[1]])

    print(out)
    t = tab.Table(out, names=('Q', 'nH', 'n16', 'n84', 'Z', 'Z16', 'Z84'))
    out_tab = tab.vstack((out_tab, t))

uvb_column = [
    'Q14', 'Q15', 'Q16', 'Q17', 'Q18', 'Q19', 'Q20', 'P19', 'FG20', 'HM12'
]
out_tab.add_column(uvb_column, name='uvb')

out_tab.write(outfile, overwrite=True)
"""
ions_to_use= ['C+3', 'N+3', 'Si+3', 'O+5', 'C+2']
true_Q =18

outpath = '/home/vikram/cloudy_run/figures/2DLLS'
model_path  = '/home/vikram/cloudy_run/metal_NH19'
outfile = outpath + '/NH19_metal_2D.fits'
예제 #35
0
    if len(t) == 0:
        continue
    if args.min_good_time is not None:
        tgti = Table.read(fn, hdu='gti')
        good_time = np.sum(tgti['STOP'] - tgti['START'])
        if good_time < args.min_good_time:
            print('Ignoring file {0} with good time {1:0.2f}.'.format(
                fn, good_time))
            continue
    tlist.append(t)

log.info('Concatenating files')
if len(tlist) == 1:
    etable = tlist[0]
else:
    etable = vstack(tlist, metadata_conflicts='silent')
del tlist

m = args.maxharm
ts_func = cached_zm if args.ztest else cached_hm
phasesinitial = etable['PULSE_PHASE'].astype(np.float32)
hbest = z2m(phasesinitial, m=m)[-1] if args.ztest else hm(phasesinitial, m=m)
eminbest = 0.0
emaxbest = 100.0
ts_name = "Ztest" if args.ztest else "Htest"
if args.ztest:
    print("Initial {0} = {1}".format(ts_name, np.round(hbest, 3)))
else:
    print("Initial {0} = {1} ({2} sigma)".format(ts_name, np.round(hbest, 3),
                                                 np.round(h2sig(hbest), 3)))
예제 #36
0
    def do_photometry(self, image, init_guesses=None):
        """
        Perform PSF photometry in ``image``.

        This method assumes that ``psf_model`` has centroids and flux
        parameters which will be fitted to the data provided in
        ``image``. A compound model, in fact a sum of ``psf_model``,
        will be fitted to groups of stars automatically identified by
        ``group_maker``. Also, ``image`` is not assumed to be background
        subtracted.  If ``init_guesses`` are not ``None`` then this method
        uses ``init_guesses`` as initial guesses for the centroids. If
        the centroid positions are set as ``fixed`` in the PSF model
        ``psf_model``, then the optimizer will only consider the flux as
        a variable.

        Parameters
        ----------
        image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
            Image to perform photometry.
        init_guesses: `~astropy.table.Table`
            Table which contains the initial guesses (estimates) for the set
            of parameters. Columns 'x_0' and 'y_0' which represent the
            positions (in pixel coordinates) for each object must be present.
            'flux_0' can also be provided to set initial fluxes.
            If 'flux_0' is not provided, aperture photometry is used to
            estimate initial values for the fluxes. Additional columns of the
            form '<parametername>_0' will be used to set the initial guess for
            any parameters of the ``psf_model`` model that are not fixed.

        Returns
        -------
        output_table : `~astropy.table.Table` or None
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process.
            None is returned if no sources are found in ``image``.
        """

        if init_guesses is not None:
            table = super(IterativelySubtractedPSFPhotometry,
                self).do_photometry(image, init_guesses)
            table['iter_detected'] = np.ones(table['x_fit'].shape,
                                             dtype=np.int32)

            # n_start = 2 because it starts in the second iteration
            # since the first iteration is above
            output_table = self._do_photometry(init_guesses.colnames,
                                               n_start=2)
            output_table = vstack([table, output_table])
        else:
            if self.bkg_estimator is not None:
                self._residual_image = image - self.bkg_estimator(image)

            if self.aperture_radius is None:
                if hasattr(self.psf_model, 'fwhm'):
                    self.aperture_radius = self.psf_model.fwhm.value
                elif hasattr(self.psf_model, 'sigma'):
                    self.aperture_radius = (self.psf_model.sigma.value *
                                            gaussian_sigma_to_fwhm)

            output_table = self._do_photometry(['x_0', 'y_0', 'flux_0'])
        return output_table
예제 #37
0
    def from_horizons(cls,
                      targetids,
                      id_type='smallbody',
                      epochs=None,
                      center='500@10',
                      **kwargs):
        """Load target orbital elements from
        `JPL Horizons <https://ssd.jpl.nasa.gov/horizons.cgi>`_ using
        `astroquery.jplhorizons.HorizonsClass.elements`

        Parameters
        ----------
        targetids : str or iterable of str
            Target identifier, i.e., a number, name, designation, or JPL
            Horizons record number, for one or more targets.
        id_type : str, optional
            The nature of ``targetids`` provided; possible values are
            ``'smallbody'`` (asteroid or comet), ``'majorbody'`` (planet or
            satellite), ``'designation'`` (asteroid or comet designation),
            ``'name'`` (asteroid or comet name), ``'asteroid_name'``,
            ``'comet_name'``, ``'id'`` (Horizons id).
            Default: ``'smallbody'``
        epochs : `~astropy.time.Time` or dict, optional
            Epochs of elements to be queried; requires a
            `~astropy.time.Time` object with a single or multiple epochs. A
            dictionary including keywords ``start`` and ``stop``, as well
            as either ``step`` or ``number``, can be used to generate a range
            of epochs. ``start`` and ``stop`` have to be
            `~astropy.time.Time` objects (see :ref:`epochs`).
            If ``step`` is provided, a range
            of epochs will be queries starting at ``start`` and ending at
            ``stop`` in steps of ``step``; ``step`` has to be provided as
            a `~astropy.units.Quantity` object with integer value and a
            unit of either minutes, hours, days, or years. If
            ``number`` is provided as an integer, the
            interval defined by
            ``start`` and ``stop`` is split into ``number`` equidistant
            intervals. If ``None`` is
            provided, current date and time are
            used. All epochs should be provided in TDB; if not, they will be
            converted to TDB and a `~sbpy.data.TimeScaleWarning` will be
            raised.  Default: ``None``
        center : str, optional, default ``'500@10'`` (center of the Sun)
            Elements will be provided relative to this position.
        **kwargs : optional
            Arguments that will be provided to
            `astroquery.jplhorizons.HorizonsClass.elements`.

        Notes
        -----
        * For detailed explanations of the queried fields, refer to
          `astroquery.jplhorizons.HorizonsClass.elements` and the
          `JPL Horizons documentation <https://ssd.jpl.nasa.gov/?horizons_doc>`_.
        * By default, elements are provided in the J2000.0 reference
          system and relative to the ecliptic and mean equinox of the
          reference epoch. Different settings can be chosen using
          additional keyword arguments as used by
          `astroquery.jplhorizons.HorizonsClass.elements`.

        Returns
        -------
        `~Orbit` object

        Examples
        --------
        >>> from sbpy.data import Orbit
        >>> from astropy.time import Time
        >>> epoch = Time('2018-05-14', scale='tdb')
        >>> eph = Orbit.from_horizons('Ceres', epochs=epoch)  # doctest: +REMOTE_DATA
        """

        # modify epoch input to make it work with astroquery.jplhorizons
        # maybe this stuff should really go into that module....
        if epochs is None:
            epochs = [Time.now().tdb.jd]
        elif isinstance(epochs, Time):
            if epochs.scale is not 'tdb':
                warn(('converting {} epochs to tdb for use in '
                      'astroquery.jplhorizons').format(epochs.scale),
                     TimeScaleWarning)
            epochs = epochs.tdb.jd
        elif isinstance(epochs, dict):
            if 'start' in epochs and 'stop' in epochs and 'number' in epochs:
                epochs['step'] = epochs['number'] * u.dimensionless_unscaled
            # convert to tdb and iso for astroquery.jplhorizons
            epochs['start'] = epochs['start'].tdb.iso
            epochs['stop'] = epochs['stop'].tdb.iso
            if 'step' in epochs:
                if epochs['step'].unit is not u.dimensionless_unscaled:
                    epochs['step'] = '{:d}{:s}'.format(
                        int(epochs['step'].value), {
                            u.minute: 'm',
                            u.hour: 'h',
                            u.d: 'd',
                            u.year: 'y'
                        }[epochs['step'].unit])
                else:
                    epochs['step'] = '{:d}'.format(
                        int(epochs['step'].value - 1))

        # if targetids is a list, run separate Horizons queries and append
        if not isinstance(targetids, (list, ndarray, tuple)):
            targetids = [targetids]

        # append elements table for each targetid
        all_elem = None
        for targetid in targetids:

            # load elements using astroquery.jplhorizons
            obj = Horizons(id=targetid,
                           id_type=id_type,
                           location=center,
                           epochs=epochs)
            try:
                elem = obj.elements(**kwargs)
            except ValueError as e:
                raise QueryError(
                    ('Error raised by astroquery.jplhorizons: {:s}\n'
                     'The following query was attempted: {:s}').format(
                         str(e), obj.uri))

            # workaround for current version of astroquery to make
            # column units compatible with astropy.table.QTable
            # should really change '---' units to None in
            # astroquery.jplhorizons.__init__.py
            for column_name in elem.columns:
                if elem[column_name].unit == '---':
                    elem[column_name].unit = None

            if all_elem is None:
                all_elem = elem
            else:
                all_elem = vstack([all_elem, elem])

        # turn epochs into astropy.time.Time and apply timescale
        # https://ssd.jpl.nasa.gov/?horizons_doc
        all_elem['epoch'] = Time(all_elem['datetime_jd'],
                                 format='jd',
                                 scale='tdb')
        all_elem.remove_column('datetime_jd')
        all_elem.remove_column('datetime_str')

        return cls.from_table(all_elem)
예제 #38
0
def save_scan(times,
              ra,
              dec,
              channels,
              filename='out.fits',
              other_columns=None,
              scan_type=None,
              src_ra=None,
              src_dec=None,
              srcname='Dummy',
              counts_to_K=0.03):
    """Save a simulated scan in fitszilla format.

    Parameters
    ----------
    times : iterable
        times corresponding to each bin center, in seconds
    ra : iterable
        RA corresponding to each bin center
    dec : iterable
        Dec corresponding to each bin center
    channels : {'Ch0': array([...]), 'Ch1': array([...]), ...}
        Dictionary containing the count array. Keys represent the name of the
        channel
    filename : str
        Output file name
    srcname : str
        Name of the source
    counts_to_K : float, array or dict
        Conversion factor between counts and K. If array, it has to be the same
        length as channels.keys()
    """
    if src_ra is None:
        src_ra = np.mean(ra)
    if src_dec is None:
        src_dec = np.mean(dec)
    # If it's a single value, make it into a list
    if not isinstance(counts_to_K, collections.Iterable):
        counts_to_K = counts_to_K * np.ones(len(list(channels.keys())))
    # If it's a list, make it into a dict
    if not hasattr(counts_to_K, 'keys'):
        counts_to_K = dict([(ch, counts_to_K[i])
                            for i, ch in enumerate(channels.keys())])

    curdir = os.path.abspath(os.path.dirname(__file__))
    template = os.path.abspath(
        os.path.join(curdir, 'data', 'scan_template.fits'))
    lchdulist = fits.open(template)
    datahdu = lchdulist['DATA TABLE']
    temphdu = lchdulist['ANTENNA TEMP TABLE']
    lchdulist[0].header['SOURCE'] = "Dummy"
    lchdulist[0].header['ANTENNA'] = "SRT"
    lchdulist[0].header['HIERARCH RIGHTASCENSION'] = np.radians(src_ra)
    lchdulist[0].header['HIERARCH DECLINATION'] = np.radians(src_dec)
    if scan_type is not None:
        lchdulist[0].header['HIERARCH SubScanType'] = scan_type

    data_table_data = Table(datahdu.data)

    obstimes = Time((times / 86400 + 57000) * u.day, format='mjd', scale='utc')

    coords = SkyCoord(ra,
                      dec,
                      unit=u.degree,
                      location=locations['srt'],
                      obstime=obstimes)

    altaz = coords.altaz
    el = altaz.alt.rad
    az = altaz.az.rad
    newtable = Table(
        names=['time', 'raj2000', 'decj2000', "el", "az"],
        data=[obstimes.value,
              np.radians(ra),
              np.radians(dec), el, az])

    for ch in channels.keys():
        newtable[ch] = channels[ch]
    if other_columns is None:
        other_columns = {}
    for col in other_columns.keys():
        newtable[col] = other_columns[col]

    data_table_data = vstack([data_table_data, newtable])

    nrows = len(data_table_data)

    hdu = fits.BinTableHDU.from_columns(datahdu.data.columns, nrows=nrows)
    for colname in datahdu.data.columns.names:
        hdu.data[colname][:] = data_table_data[colname]

    datahdu.data = hdu.data

    temptable = Table()
    for ch in channels.keys():
        temptable[ch] = newtable[ch] * counts_to_K[ch]

    thdu = fits.BinTableHDU.from_columns(temphdu.data.columns, nrows=nrows)
    for colname in temphdu.data.columns.names:
        thdu.data[colname][:] = temptable[colname]

    temphdu.data = thdu.data

    lchdulist[0].header['SOURCE'] = srcname
    lchdulist.writeto(filename, overwrite=True)
    lchdulist.close()
예제 #39
0
idx, d2d, _ = match_coordinates_sky(xmmcoord, bestmfcoord)
mask = d2d <= 5 * u.arcsec  #make sure match is within 5 arcsec (like in topcat)
idx = idx[mask]
xmmmf = bestmf[idx]

## match with chandra
print('Matching Chandra')
chan = Table.read('UDS_catalogues/chandra_catalogue.fits')
chan['RA'].unit = u.deg
chan['Dec'].unit = u.deg
chancoord = SkyCoord(chan['RA'], chan['Dec'])
idx, d2d, _ = match_coordinates_sky(chancoord, bestmfcoord)
mask = d2d <= 1 * u.arcsec  #make sure match is within 1 arcsec (like in topcat)
idx = idx[mask]
chanmf = bestmf[idx]

# combine chandra and xmm
print('Joining xray table')
xraymf = vstack([chanmf, xmmmf])
#%%
# boolean whether a source is seen in x-rays
xray = np.isin(bestmf['NUMBER_05B'], xraymf['NUMBER_05B'])
xraycol = Column(xray, 'X-ray')
bestmf.add_column(xraycol)

#%% Save the tables
extra = '_extra_clean'
semcom.write('mag_flux_tables/J/mag_flux_table_J' + extra + '.fits')
bestmf.write('mag_flux_tables/J/mag_flux_table_best_J' + extra + '.fits')
starsmf.write('mag_flux_tables/J/stars_mag_flux_table_J' + extra + '.fits')
xraymf.write('mag_flux_tables/J/xray_mag_flux_table_best_J' + extra + '.fits')
예제 #40
0
#~ plt.show()

# =============================================================================
# Apartado B
# =============================================================================

d10 = ascii.read('../ej1b/L_10_Tvar.dat')
d20 = ascii.read('../ej1b/L_20_Tvar.dat')
d40 = ascii.read('../ej1b/L_40_Tvar.dat')

dneg10 = ascii.read('../neg_modL_10_Tvar.dat')
dneg20 = ascii.read('../neg_modL_20_Tvar.dat')
dneg40 = ascii.read('../neg_modL_40_Tvar.dat')

sd10 = vstack([d10, dneg10])
sd20 = vstack([d20, dneg20])
sd40 = vstack([d40, dneg40])

plt.plot(sd10['T'], sd10['cv'], '*', label=r'$10\times10$')
plt.plot(sd20['T'], sd20['cv'], 'o', label=r'$20\times20$')
plt.plot(sd40['T'], sd40['cv'], '.', label=r'$40\times40$')
plt.xlabel(r'$T/T_c$')
plt.ylabel(r'$C_v$')
plt.xlim(0.3, 2.8)
plt.ylim(0., 6)
plt.legend(loc='best')
plt.savefig('cv.png')
plt.close()

plt.plot(sd10['T'], sd10['e'], '*', label=r'$10\times10$')
예제 #41
0
    while True:
        startra, startdec = ra, dec
        tcopy = lt
        tcopy['dist'] = np.sqrt((np.cos(dec * np.pi / 180.0) *
                                 (tcopy['RA'] - ra))**2.0 +
                                (tcopy['DEC'] - dec)**2.0) * 3600.0
        tcopy = tcopy[tcopy['dist'] < 180]
        print 'Iter', iter, 'found', len(tcopy), 'neighbours'

        # make sure the original source is in there
        for nr in tcopy:
            if sourcename == nr['Source_Name']:
                break
        else:
            if 'Maj' in r.columns:
                tcopy = vstack((tcopy, r))

        ra = np.mean(tcopy['RA'])
        dec = np.mean(tcopy['DEC'])

        if startra == ra and startdec == dec:
            break
        iter += 1
        if iter == 10:
            break

    # now find the bounding box of the resulting collection
    ra, dec, size = find_bbox(tcopy)

    if np.isnan(size):
        ra = r['RA']
예제 #42
0
outfile = dirmain + '/' + dirout + '/' + fname
if not os.path.isdir(dirmain + '/' + dirout):
    os.makedirs(dirmain + '/' + dirout)

tot_data = None
f = h5py.File(fi, 'r')
print('try loading', fi, len(f.keys()))
n_lc = 0
for i, keyb in enumerate(f.keys()):
    tab = Table.read(fi, path=keyb)
    #print(i,tab)
    n_lc += len(tab)
    if tot_data is None:
        tot_data = tab
    else:
        tot_data = vstack([tot_data, tab])
print('number of lc', n_lc)

#print(tot_data.colnames)
"""
for name in tot_data.colnames:
    if 'Cov' in name:
        print name
"""
param_names = ['x0', 'x1', 'c']
outvars = [
    'mb_recalc', 'salt2.CovColormb', 'salt2.CovX1mb', 'salt2.CovX0mb',
    'salt2.Covmbmb'
]

snutils = SN_Utils()
예제 #43
0
    def from_mpc(cls, targetids, id_type=None, target_type=None, **kwargs):
        """Load latest orbital elements from the
        `Minor Planet Center <http://minorplanetcenter.net>`_.

        Parameters
        ----------
        targetids : str or iterable of str
            Target identifier, resolvable by the Minor Planet
            Ephemeris Service. If multiple targetids are provided in a list,
            the same format (number, name, or designation) must be used.

        id_type : str, optional
            ``'name'``, ``'number'``, ``'designation'``, or ``None`` to
            indicate
            type of identifiers provided in ``targetids``. If ``None``,
            automatic identification is attempted using
            `~sbpy.data.names`. Default: ``None``

        target_type : str, optional
            ``'asteroid'``, ``'comet'``, or ``None`` to indicate
            target type. If ``None``, automatic identification is
            attempted using
            `~sbpy.data.names`. Default: ``None``

        **kwargs : optional
            Additional keyword arguments are passed to
            `~astroquery.mpc.MPC.query_object`

        Returns
        -------
        `~Orbit` object
            The resulting object will be populated with most columns as
            defined in
            `~astroquery.mpc.query_object`; refer
            to that document on information on how to modify the list
            of queried parameters.


        Examples
        --------
        >>> from sbpy.data import Orbit
        >>> orb = Orbit.from_mpc('Ceres') # doctest: +REMOTE_DATA
        >>> orb  # doctest: +SKIP
        <QTable length=1>
         absmag    Q      arc      w     ...     a        Tj   moid_uranus moid_venus
          mag      AU      d      deg    ...     AU                 AU         AU
        float64 float64 float64 float64  ...  float64  float64   float64    float64
        ------- ------- ------- -------- ... --------- ------- ----------- ----------
           3.34    2.98 79653.0 73.59764 ... 2.7691652     3.3     15.6642    1.84632
        """

        from ..data import Names

        # if targetids is a list, run separate Horizons queries and append
        if not isinstance(targetids, (list, ndarray, tuple)):
            targetids = [targetids]

        for targetid in targetids:
            if target_type is None:
                target_type = Names.asteroid_or_comet(targetid)
            if id_type is None:
                if target_type == 'asteroid':
                    ident = Names.parse_asteroid(targetid)
                elif target_type == 'comet':
                    ident = Names.parse_comet(targetid)
                if 'name' in ident:
                    id_type = 'name'
                elif 'desig' in ident:
                    id_type = 'designation'
                elif 'number' in ident:
                    id_type = 'number'

        # append ephemerides table for each targetid
        all_elem = None
        for targetid in targetids:

            # get elements
            kwargs[id_type] = targetid
            e = MPC.query_object(target_type, **kwargs)

            # parse results from MPC.query_object
            results = {}
            for key, val in e[0].items():
                # skip if key not in conf.mpc_orbit_fields
                if key not in conf.mpc_orbit_fields:
                    continue

                fieldname, fieldunit = conf.mpc_orbit_fields[key]
                # try to convert to float
                try:
                    val = float(val)
                except (ValueError, TypeError):
                    pass

                if fieldname == 'mpc_orbit_type':
                    results[fieldname] = [{
                        0: 'Unclassified',
                        1: 'Atira',
                        2: 'Aten',
                        3: 'Apollo',
                        4: 'Amor',
                        5: 'Mars Crosser',
                        6: 'Hungaria',
                        7: 'Phocaeas',
                        8: 'Hilda',
                        9: 'Jupiter Trojan',
                        10: 'Distant Object'
                    }[int(val)]]
                elif fieldunit is None:
                    results[fieldname] = [val]
                elif fieldunit == 'time_jd_utc':
                    results[fieldname] = Time([val], scale='utc', format='jd')
                else:
                    if val is None:
                        continue
                    results[fieldname] = [val] * u.Unit(fieldunit)

            if all_elem is None:
                all_elem = QTable(results)
            else:
                all_elem = vstack([all_elem, QTable(results)])

        return cls.from_table(all_elem)
예제 #44
0
    def merge(self,
              new_ll,
              thresh=None,
              loggf_thresh=None,
              raise_exception=True,
              skip_exactly_equal_lines=False,
              skip_equal_loggf=False,
              override_current=False,
              in_place=True,
              add_new_lines=True,
              ignore_conflicts=False):
        """
        new_ll: 
            new LineList object to merge into this one

        thresh: 
            threshold for wavelength check when matching lines
            Defaults to self.default_thresh (0.1)

        loggf_thresh: 
            threshold for loggf check when finding identical lines
            Defaults to self.default_loggf_thresh (0.01)

        raise_exception:
            If True (default), finds all the conflicts and raises LineListConflict
              Note: if in_place == True, then it merges new lines BEFORE raising the error
            If False, uses self.pick_best_line() to pick a line to overwrite

        skip_exactly_equal_lines:
            If True, skips lines that have equal hashes during the merge
            If False (default), raises exception for duplicate lines

        skip_equal_loggf:
            If True, skips lines that are almost exactly equal during the merge
            If False (default), raises exception for duplicate lines

        override_current: 
            If True, uses new lines whenever duplicate lines are found.
            If False (default), keep current lines whenever duplicate lines are found.
            Ignored if raise_exception == True
        
        in_place:
            If True (default), merge new lines into this object. It will do so BEFORE throwing any LineListConflict exceptions!
            If False, return a new LineList
        
        add_new_lines:
            If True (default), add new lines when merging.
            If False, do not add new lines. This is to replace lines from a list without adding them.

        ignore_conflicts:
            If True, merge the linelists without checking for conflicts
            If False (default), check for conflicts during merge

        """
        if thresh == None: thresh = self.default_thresh
        if loggf_thresh == None: loggf_thresh = self.default_loggf_thresh
        if len(self) == 0:
            if not in_place:
                return new_ll.copy()
            else:
                n_cols = len(new_ll.colnames)
                names = new_ll.colnames
                dtype = [None] * n_cols
                self._init_indices = self._init_indices and new_ll._copy_indices
                self._init_from_table(new_ll, names, dtype, n_cols, True)
                return None

        if ignore_conflicts:
            if self.verbose:
                print("Ignoring conflicts: adding {} lines".format(
                    len(new_ll)))
            if not in_place:
                return table.vstack([self, new_ll])
            else:
                #combined = table.vstack([self, new_ll])
                #names = combined.colnames
                #dtype = [None] * n_cols
                #self._init_indices = self._init_indices and combined._copy_indices
                #self._init_from_table(combined, names, dtype, n_cols, True)
                #return None
                raise NotImplementedError
        num_in_list = 0
        num_with_multiple_conflicts = 0
        lines_to_add = []

        for j, new_line in enumerate(new_ll):
            index = self.find_match(new_line, thresh)
            if index == -1:  # New Line
                lines_to_add.append(new_line)
            elif raise_exception:  # Record all conflicts later
                pass
            else:  # use self.pick_best_line to find best line
                if index < -1:
                    num_with_multiple_conflicts += 1
                    index = self.pick_best_line(new_line, thresh)
                    # index < 0 is the convention that you should just skip the line rather than overwriting
                    if index < 0: continue
                    if override_current:
                        self[index] = new_line
                elif index >= 0:
                    num_in_list += 1
                    if override_current:
                        self[index] = new_line
        num_lines_added = len(lines_to_add)
        if add_new_lines and len(lines_to_add) > 0:
            if in_place:
                for line in lines_to_add:
                    self.add_row(line)
            else:
                new_lines = Table(rows=lines_to_add,
                                  names=lines_to_add[0].colnames)
                old_lines = self.copy()
                # During the vstack creates an empty LineList and warns
                new_data = table.vstack([old_lines, new_lines])
        else:
            if not in_place:
                new_data = self.copy()

        # Note: if in_place == True, then it merges new lines BEFORE raising the exception
        if raise_exception:
            conflicts1, conflicts2 = self.identify_conflicts(
                self,
                new_ll,
                skip_exactly_equal_lines=skip_exactly_equal_lines,
                skip_equal_loggf=skip_equal_loggf,
                dwl_thresh=thresh,
                dgf_thresh=loggf_thresh)
            if len(conflicts1) > 0:
                raise LineListConflict(conflicts1, conflicts2)
        if self.verbose:
            print("Num lines added: {}".format(num_lines_added))
            print("Num lines {}: {}".format(
                'replaced' if override_current else 'ignored', num_in_list))
            print("Num lines with multiple matches: {}".format(
                num_with_multiple_conflicts))

        if not in_place:
            return LineList(new_data)
        else:
            return None
예제 #45
0
    def nstar(self, image, star_groups):
        """
        Fit, as appropriate, a compound or single model to the given
        ``star_groups``. Groups are fitted sequentially from the
        smallest to the biggest. In each iteration, ``image`` is
        subtracted by the previous fitted group.

        Parameters
        ----------
        image : numpy.ndarray
            Background-subtracted image.
        star_groups : `~astropy.table.Table`
            This table must contain the following columns: ``id``,
            ``group_id``, ``x_0``, ``y_0``, ``flux_0``.  ``x_0`` and
            ``y_0`` are initial estimates of the centroids and
            ``flux_0`` is an initial estimate of the flux. Additionally,
            columns named as ``<param_name>_0`` are required if any other
            parameter in the psf model is free (i.e., the ``fixed``
            attribute of that parameter is ``False``).

        Returns
        -------
        result_tab : `~astropy.table.Table`
            Astropy table that contains photometry results.
        image : numpy.ndarray
            Residual image.
        """

        result_tab = Table()

        for param_tab_name in self._pars_to_output.keys():
            result_tab.add_column(Column(name=param_tab_name))

        y, x = np.indices(image.shape)

        star_groups = star_groups.group_by('group_id')
        for n in range(len(star_groups.groups)):
            group_psf = get_grouped_psf_model(self.psf_model, star_groups.groups[n],
                                              self._pars_to_set)
            usepixel = np.zeros_like(image, dtype=np.bool)

            for row in star_groups.groups[n]:
                usepixel[overlap_slices(large_array_shape=image.shape,
                                        small_array_shape=self.fitshape,
                                        position=(row['y_0'], row['x_0']),
                                        mode='trim')[0]] = True

            fit_model = self.fitter(group_psf, x[usepixel], y[usepixel],
                                    image[usepixel])
            param_table = self._model_params2table(fit_model,
                                                   len(star_groups.groups[n]))
            result_tab = vstack([result_tab, param_table])

            try:
                from astropy.nddata.utils import NoOverlapError
            except ImportError:
                raise ImportError("astropy 1.1 or greater is required in "
                                  "order to use this class.")
            # do not subtract if the fitting did not go well
            try:
                image = subtract_psf(image, self.psf_model, param_table,
                                     subshape=self.fitshape)
            except NoOverlapError:
                pass

        return result_tab, image
예제 #46
0
def main(args):

    # Set up the logger
    if args.verbose:
        log = get_logger(DEBUG)
    else:
        log = get_logger()

    # Make sure all necessary environment variables are set
    setup_envs()

    # Initialize random number generator to use.
    np.random.seed(args.seed)
    random_state = np.random.RandomState(args.seed)

    # Derive spectrograph number from nstart if needed
    if args.spectrograph is None:
        args.spectrograph = args.nstart / args.n_fibers

    # Read fibermapfile to get object type, night and expid
    fibermap, objtype, night, expid = get_fibermap(args.fibermap,
                                                   log=log,
                                                   nspec=args.nspec)

    # Initialize the spectral simulator
    log.info("Initializing SpecSim with config {}".format(args.config))
    lvmparams = load_lvmparams(config=args.config, telescope=args.telescope)
    qsim = get_simulator(args.config, num_fibers=1, params=lvmparams)

    if args.simspec:
        # Read the input file
        log.info('Reading input file {}'.format(args.simspec))
        simspec = lvmsim.io.read_simspec(args.simspec)
        nspec = simspec.nspec
        if simspec.flavor == 'arc':
            # - TODO: do we need quickgen to support arcs?  For full pipeline
            # - arcs are used to measure PSF but aren't extracted except for
            # - debugging.
            # - TODO: if we do need arcs, this needs to be redone.
            # - conversion from phot to flux doesn't include throughput,
            # - and arc lines are rebinned to nearest 0.2 A.

            # Create full wavelength and flux arrays for arc exposure
            wave_b = np.array(simspec.wave['b'])
            wave_r = np.array(simspec.wave['r'])
            wave_z = np.array(simspec.wave['z'])
            phot_b = np.array(simspec.phot['b'][0])
            phot_r = np.array(simspec.phot['r'][0])
            phot_z = np.array(simspec.phot['z'][0])
            sim_wave = np.concatenate((wave_b, wave_r, wave_z))
            sim_phot = np.concatenate((phot_b, phot_r, phot_z))
            wavelengths = np.arange(3533., 9913.1, 0.2)
            phot = np.zeros(len(wavelengths))
            for i in range(len(sim_wave)):
                wavelength = sim_wave[i]
                flux_index = np.argmin(abs(wavelength - wavelengths))
                phot[flux_index] = sim_phot[i]
            # Convert photons to flux: following specter conversion method
            dw = np.gradient(wavelengths)
            exptime = 5.  # typical BOSS exposure time in s
            fibarea = const.pi * (1.07e-2 /
                                  2)**2  # cross-sectional fiber area in cm^2
            hc = 1.e17 * const.h * const.c  # convert to erg A
            spectra = (hc * exptime * fibarea * dw * phot) / wavelengths
        else:
            wavelengths = simspec.wave['brz']
            spectra = simspec.flux
        if nspec < args.nspec:
            log.info("Only {} spectra in input file".format(nspec))
            args.nspec = nspec

    else:
        # Initialize the output truth table.
        spectra = []
        wavelengths = qsim.source.wavelength_out.to(u.Angstrom).value
        npix = len(wavelengths)
        truth = dict()
        meta = Table()
        truth['OBJTYPE'] = np.zeros(args.nspec, dtype=(str, 10))
        truth['FLUX'] = np.zeros((args.nspec, npix))
        truth['WAVE'] = wavelengths
        jj = list()

        for thisobj in set(true_objtype):
            ii = np.where(true_objtype == thisobj)[0]
            nobj = len(ii)
            truth['OBJTYPE'][ii] = thisobj
            log.info('Generating {} template'.format(thisobj))

            # Generate the templates
            if thisobj == 'ELG':
                elg = lvmsim.templates.ELG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = elg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_elg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'LRG':
                lrg = lvmsim.templates.LRG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = lrg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_lrg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'QSO':
                qso = lvmsim.templates.QSO(wave=wavelengths)
                flux, tmpwave, meta1 = qso.make_templates(
                    nmodel=nobj, seed=args.seed, zrange=args.zrange_qso)
            elif thisobj == 'BGS':
                bgs = lvmsim.templates.BGS(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = bgs.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_bgs,
                    rmagrange=args.rmagrange_bgs,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'STD':
                fstd = lvmsim.templates.FSTD(wave=wavelengths)
                flux, tmpwave, meta1 = fstd.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'QSO_BAD':  # use STAR template no color cuts
                star = lvmsim.templates.STAR(wave=wavelengths)
                flux, tmpwave, meta1 = star.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'MWS_STAR' or thisobj == 'MWS':
                mwsstar = lvmsim.templates.MWS_STAR(wave=wavelengths)
                flux, tmpwave, meta1 = mwsstar.make_templates(nmodel=nobj,
                                                              seed=args.seed)
            elif thisobj == 'WD':
                wd = lvmsim.templates.WD(wave=wavelengths)
                flux, tmpwave, meta1 = wd.make_templates(nmodel=nobj,
                                                         seed=args.seed)
            elif thisobj == 'SKY':
                flux = np.zeros((nobj, npix))
                meta1 = Table(dict(REDSHIFT=np.zeros(nobj, dtype=np.float32)))
            elif thisobj == 'TEST':
                flux = np.zeros((args.nspec, npix))
                indx = np.where(wave > 5800.0 - 1E-6)[0][0]
                ref_integrated_flux = 1E-10
                ref_cst_flux_density = 1E-17
                single_line = (np.arange(args.nspec) % 2 == 0).astype(
                    np.float32)
                continuum = (np.arange(args.nspec) % 2 == 1).astype(np.float32)

                for spec in range(args.nspec):
                    flux[spec, indx] = single_line[
                        spec] * ref_integrated_flux / np.gradient(wavelengths)[
                            indx]  # single line
                    flux[spec] += continuum[
                        spec] * ref_cst_flux_density  # flat continuum

                meta1 = Table(
                    dict(REDSHIFT=np.zeros(args.nspec, dtype=np.float32),
                         LINE=wave[indx] *
                         np.ones(args.nspec, dtype=np.float32),
                         LINEFLUX=single_line * ref_integrated_flux,
                         CONSTFLUXDENSITY=continuum * ref_cst_flux_density))
            else:
                log.fatal('Unknown object type {}'.format(thisobj))
                sys.exit(1)

            # Pack it in.
            truth['FLUX'][ii] = flux
            meta = vstack([meta, meta1])
            jj.append(ii.tolist())

            # Sanity check on units; templates currently return ergs, not 1e-17 ergs...
            # assert (thisobj == 'SKY') or (np.max(truth['FLUX']) < 1e-6)

        # Sort the metadata table.
        jj = sum(jj, [])
        meta_new = Table()
        for k in range(args.nspec):
            index = int(np.where(np.array(jj) == k)[0])
            meta_new = vstack([meta_new, meta[index]])
        meta = meta_new

        # Add TARGETID and the true OBJTYPE to the metadata table.
        meta.add_column(
            Column(true_objtype, dtype=(str, 10), name='TRUE_OBJTYPE'))
        meta.add_column(Column(targetids, name='TARGETID'))

        # Rename REDSHIFT -> TRUEZ anticipating later table joins with zbest.Z
        meta.rename_column('REDSHIFT', 'TRUEZ')

    # ---------- end simspec

    # explicitly set location on focal plane if needed to support airmass
    # variations when using specsim v0.5
    if qsim.source.focal_xy is None:
        qsim.source.focal_xy = (u.Quantity(0, 'mm'), u.Quantity(100, 'mm'))

    # Set simulation parameters from the simspec header or lvmparams
    bright_objects = ['bgs', 'mws', 'bright', 'BGS', 'MWS', 'BRIGHT_MIX']
    gray_objects = ['gray', 'grey']
    if args.simspec is None:
        object_type = objtype
        flavor = None
    elif simspec.flavor == 'science':
        object_type = None
        flavor = simspec.header['PROGRAM']
    else:
        object_type = None
        flavor = simspec.flavor
        log.warning(
            'Maybe using an outdated simspec file with flavor={}'.format(
                flavor))

    # Set airmass
    if args.airmass is not None:
        qsim.atmosphere.airmass = args.airmass
    elif args.simspec and 'AIRMASS' in simspec.header:
        qsim.atmosphere.airmass = simspec.header['AIRMASS']
    else:
        qsim.atmosphere.airmass = 1.25  # Science Req. Doc L3.3.2

    # Set site location
    if args.location is not None:
        qsim.observation.observatory = args.location
    else:
        qsim.observation.observatory = 'APO'

    # Set exptime
    if args.exptime is not None:
        qsim.observation.exposure_time = args.exptime * u.s
    elif args.simspec and 'EXPTIME' in simspec.header:
        qsim.observation.exposure_time = simspec.header['EXPTIME'] * u.s
    elif objtype in bright_objects:
        qsim.observation.exposure_time = lvmparams['exptime_bright'] * u.s
    else:
        qsim.observation.exposure_time = lvmparams['exptime_dark'] * u.s

    # Set Moon Phase
    if args.moon_phase is not None:
        qsim.atmosphere.moon.moon_phase = args.moon_phase
    elif args.simspec and 'MOONFRAC' in simspec.header:
        qsim.atmosphere.moon.moon_phase = simspec.header['MOONFRAC']
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_phase = 0.7
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_phase = 0.1
    else:
        qsim.atmosphere.moon.moon_phase = 0.5

    # Set Moon Zenith
    if args.moon_zenith is not None:
        qsim.atmosphere.moon.moon_zenith = args.moon_zenith * u.deg
    elif args.simspec and 'MOONALT' in simspec.header:
        qsim.atmosphere.moon.moon_zenith = simspec.header['MOONALT'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_zenith = 30 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_zenith = 80 * u.deg
    else:
        qsim.atmosphere.moon.moon_zenith = 100 * u.deg

    # Set Moon - Object Angle
    if args.moon_angle is not None:
        qsim.atmosphere.moon.separation_angle = args.moon_angle * u.deg
    elif args.simspec and 'MOONSEP' in simspec.header:
        qsim.atmosphere.moon.separation_angle = simspec.header[
            'MOONSEP'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.separation_angle = 50 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg
    else:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg

    # Initialize per-camera output arrays that will be saved
    waves, trueflux, noisyflux, obsivar, resolution, sflux = {}, {}, {}, {}, {}, {}

    maxbin = 0
    nmax = args.nspec
    for camera in qsim.instrument.cameras:
        # Lookup this camera's resolution matrix and convert to the sparse format used in lvmspec.
        R = Resolution(camera.get_output_resolution_matrix())
        resolution[camera.name] = np.tile(R.to_fits_array(),
                                          [args.nspec, 1, 1])
        waves[camera.name] = (camera.output_wavelength.to(
            u.Angstrom).value.astype(np.float32))
        nwave = len(waves[camera.name])
        maxbin = max(maxbin, len(waves[camera.name]))
        nobj = np.zeros((nmax, 3, maxbin))  # object photons
        nsky = np.zeros((nmax, 3, maxbin))  # sky photons
        nivar = np.zeros((nmax, 3, maxbin))  # inverse variance (object+sky)
        cframe_observedflux = np.zeros(
            (nmax, 3, maxbin))  # calibrated object flux
        cframe_ivar = np.zeros(
            (nmax, 3, maxbin))  # inverse variance of calibrated object flux
        cframe_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to calibrated flux
        sky_ivar = np.zeros((nmax, 3, maxbin))  # inverse variance of sky
        sky_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to sky only
        frame_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to nobj+nsky
        trueflux[camera.name] = np.empty(
            (args.nspec, nwave))  # calibrated flux
        noisyflux[camera.name] = np.empty(
            (args.nspec, nwave))  # observed flux with noise
        obsivar[camera.name] = np.empty(
            (args.nspec, nwave))  # inverse variance of flux
        if args.simspec:
            dw = np.gradient(simspec.wave[camera.name])
        else:
            sflux = np.empty((args.nspec, npix))

    # - Check if input simspec is for a continuum flat lamp instead of science
    # - This does not convolve to per-fiber resolution
    if args.simspec:
        if simspec.flavor == 'flat':
            log.info("Simulating flat lamp exposure")
            for i, camera in enumerate(qsim.instrument.cameras):
                channel = camera.name
                assert camera.output_wavelength.unit == u.Angstrom
                num_pixels = len(waves[channel])
                dw = np.gradient(simspec.wave[channel])
                meanspec = resample_flux(
                    waves[channel], simspec.wave[channel],
                    np.average(simspec.phot[channel] / dw, axis=0))
                fiberflat = random_state.normal(loc=1.0,
                                                scale=1.0 / np.sqrt(meanspec),
                                                size=(nspec, num_pixels))
                ivar = np.tile(meanspec, [nspec, 1])
                mask = np.zeros((simspec.nspec, num_pixels), dtype=np.uint32)

                for kk in range((args.nspec + args.nstart - 1) //
                                args.n_fibers + 1):
                    camera = channel + str(kk)
                    outfile = lvmspec.io.findfile('fiberflat', night, expid,
                                                  camera)
                    start = max(args.n_fibers * kk, args.nstart)
                    end = min(args.n_fibers * (kk + 1), nmax)

                    if (args.spectrograph <= kk):
                        log.info(
                            "Writing files for channel:{}, spectrograph:{}, spectra:{} to {}"
                            .format(channel, kk, start, end))

                    ff = FiberFlat(waves[channel],
                                   fiberflat[start:end, :],
                                   ivar[start:end, :],
                                   mask[start:end, :],
                                   meanspec,
                                   header=dict(CAMERA=camera))
                    write_fiberflat(outfile, ff)
                    filePath = lvmspec.io.findfile("fiberflat", night, expid,
                                                   camera)
                    log.info("Wrote file {}".format(filePath))

            sys.exit(0)

    # Repeat the simulation for all spectra
    scale = 1e-17
    fluxunits = scale * u.erg / (u.s * u.cm**2 * u.Angstrom)
    for j in range(args.nspec):

        thisobjtype = objtype[j]
        sys.stdout.flush()
        if flavor == 'arc':
            qsim.source.update_in('Quickgen source {0}'.format, 'perfect',
                                  wavelengths * u.Angstrom,
                                  spectra * fluxunits)
        else:
            qsim.source.update_in('Quickgen source {0}'.format(j),
                                  thisobjtype.lower(),
                                  wavelengths * u.Angstrom,
                                  spectra[j, :] * fluxunits)
        qsim.source.update_out()

        qsim.simulate()
        qsim.generate_random_noise(random_state)

        for i, output in enumerate(qsim.camera_output):
            assert output['observed_flux'].unit == 1e17 * fluxunits
            # Extract the simulation results needed to create our uncalibrated
            # frame output file.
            num_pixels = len(output)
            nobj[j, i, :num_pixels] = output['num_source_electrons'][:, 0]
            nsky[j, i, :num_pixels] = output['num_sky_electrons'][:, 0]
            nivar[j, i, :num_pixels] = 1.0 / output['variance_electrons'][:, 0]

            # Get results for our flux-calibrated output file.
            cframe_observedflux[
                j, i, :num_pixels] = 1e17 * output['observed_flux'][:, 0]
            cframe_ivar[
                j,
                i, :num_pixels] = 1e-34 * output['flux_inverse_variance'][:, 0]

            # Fill brick arrays from the results.
            camera = output.meta['name']
            trueflux[camera][j][:] = 1e17 * output['observed_flux'][:, 0]
            noisyflux[camera][j][:] = 1e17 * (
                output['observed_flux'][:, 0] +
                output['flux_calibration'][:, 0] *
                output['random_noise_electrons'][:, 0])
            obsivar[camera][j][:] = 1e-34 * output['flux_inverse_variance'][:,
                                                                            0]

            # Use the same noise realization in the cframe and frame, without any
            # additional noise from sky subtraction for now.
            frame_rand_noise[
                j, i, :num_pixels] = output['random_noise_electrons'][:, 0]
            cframe_rand_noise[j, i, :num_pixels] = 1e17 * (
                output['flux_calibration'][:, 0] *
                output['random_noise_electrons'][:, 0])

            # The sky output file represents a model fit to ~40 sky fibers.
            # We reduce the variance by a factor of 25 to account for this and
            # give the sky an independent (Gaussian) noise realization.
            sky_ivar[
                j,
                i, :num_pixels] = 25.0 / (output['variance_electrons'][:, 0] -
                                          output['num_source_electrons'][:, 0])
            sky_rand_noise[j, i, :num_pixels] = random_state.normal(
                scale=1.0 / np.sqrt(sky_ivar[j, i, :num_pixels]),
                size=num_pixels)

    armName = {"b": 0, "r": 1, "z": 2}
    for channel in 'brz':

        # Before writing, convert from counts/bin to counts/A (as in Pixsim output)
        # Quicksim Default:
        # FLUX - input spectrum resampled to this binning; no noise added [1e-17 erg/s/cm2/s/Ang]
        # COUNTS_OBJ - object counts in 0.5 Ang bin
        # COUNTS_SKY - sky counts in 0.5 Ang bin

        num_pixels = len(waves[channel])
        dwave = np.gradient(waves[channel])
        nobj[:, armName[channel], :num_pixels] /= dwave
        frame_rand_noise[:, armName[channel], :num_pixels] /= dwave
        nivar[:, armName[channel], :num_pixels] *= dwave**2
        nsky[:, armName[channel], :num_pixels] /= dwave
        sky_rand_noise[:, armName[channel], :num_pixels] /= dwave
        sky_ivar[:, armName[channel], :num_pixels] /= dwave**2

        # Now write the outputs in DESI standard file system. None of the output file can have more than args.n_fibers spectra

        # Looping over spectrograph
        for ii in range((args.nspec + args.nstart - 1) // args.n_fibers + 1):

            start = max(args.n_fibers * ii,
                        args.nstart)  # first spectrum for a given spectrograph
            end = min(args.n_fibers * (ii + 1),
                      nmax)  # last spectrum for the spectrograph

            if (args.spectrograph <= ii):
                camera = "{}{}".format(channel, ii)
                log.info(
                    "Writing files for channel:{}, spectrograph:{}, spectra:{} to {}"
                    .format(channel, ii, start, end))
                num_pixels = len(waves[channel])

                # Write frame file
                framefileName = lvmspec.io.findfile("frame", night, expid,
                                                    camera)

                frame_flux = nobj[start:end, armName[channel], :num_pixels] + \
                    nsky[start:end, armName[channel], :num_pixels] + \
                    frame_rand_noise[start:end, armName[channel], :num_pixels]
                frame_ivar = nivar[start:end, armName[channel], :num_pixels]

                # required for slicing the resolution metric, resolusion matrix has (nspec, ndiag, wave)
                # for example if nstart =400, nspec=150: two spectrographs:
                # 400-499=> 0 spectrograph, 500-549 => 1
                sh1 = frame_flux.shape[0]

                if (args.nstart == start):
                    resol = resolution[channel][:sh1, :, :]
                else:
                    resol = resolution[channel][-sh1:, :, :]

                # must create lvmspec.Frame object
                frame = Frame(waves[channel],
                              frame_flux,
                              frame_ivar,
                              resolution_data=resol,
                              spectrograph=ii,
                              fibermap=fibermap[start:end],
                              meta=dict(CAMERA=camera, FLAVOR=simspec.flavor))
                lvmspec.io.write_frame(framefileName, frame)

                framefilePath = lvmspec.io.findfile("frame", night, expid,
                                                    camera)
                log.info("Wrote file {}".format(framefilePath))

                if args.frameonly or simspec.flavor == 'arc':
                    continue

                # Write cframe file
                cframeFileName = lvmspec.io.findfile("cframe", night, expid,
                                                     camera)
                cframeFlux = cframe_observedflux[start:end, armName[channel], :num_pixels] + \
                    cframe_rand_noise[start:end, armName[channel], :num_pixels]
                cframeIvar = cframe_ivar[start:end,
                                         armName[channel], :num_pixels]

                # must create lvmspec.Frame object
                cframe = Frame(waves[channel],
                               cframeFlux,
                               cframeIvar,
                               resolution_data=resol,
                               spectrograph=ii,
                               fibermap=fibermap[start:end],
                               meta=dict(CAMERA=camera, FLAVOR=simspec.flavor))
                lvmspec.io.frame.write_frame(cframeFileName, cframe)

                cframefilePath = lvmspec.io.findfile("cframe", night, expid,
                                                     camera)
                log.info("Wrote file {}".format(cframefilePath))

                # Write sky file
                skyfileName = lvmspec.io.findfile("sky", night, expid, camera)
                skyflux = nsky[start:end, armName[channel], :num_pixels] + \
                    sky_rand_noise[start:end, armName[channel], :num_pixels]
                skyivar = sky_ivar[start:end, armName[channel], :num_pixels]
                skymask = np.zeros(skyflux.shape, dtype=np.uint32)

                # must create lvmspec.Sky object
                skymodel = SkyModel(waves[channel],
                                    skyflux,
                                    skyivar,
                                    skymask,
                                    header=dict(CAMERA=camera))
                lvmspec.io.sky.write_sky(skyfileName, skymodel)

                skyfilePath = lvmspec.io.findfile("sky", night, expid, camera)
                log.info("Wrote file {}".format(skyfilePath))

                # Write calib file
                calibVectorFile = lvmspec.io.findfile("calib", night, expid,
                                                      camera)
                flux = cframe_observedflux[start:end,
                                           armName[channel], :num_pixels]
                phot = nobj[start:end, armName[channel], :num_pixels]
                calibration = np.zeros_like(phot)
                jj = (flux > 0)
                calibration[jj] = phot[jj] / flux[jj]

                # - TODO: what should calibivar be?
                # - For now, model it as the noise of combining ~10 spectra
                calibivar = 10 / cframe_ivar[start:end,
                                             armName[channel], :num_pixels]
                # mask=(1/calibivar>0).astype(int)??
                mask = np.zeros(calibration.shape, dtype=np.uint32)

                # write flux calibration
                fluxcalib = FluxCalib(waves[channel], calibration, calibivar,
                                      mask)
                write_flux_calibration(calibVectorFile, fluxcalib)

                calibfilePath = lvmspec.io.findfile("calib", night, expid,
                                                    camera)
                log.info("Wrote file {}".format(calibfilePath))
예제 #47
0
    def _do_photometry(self, param_tab, n_start=1):
        """
        Helper function which performs the iterations of the photometry process.

        Parameters
        ----------
        param_names :  list
            Names of the columns which represent the initial guesses.
            For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on the
            center positions and the flux.
        n_start : int
            Integer representing the start index of the iteration.
            It is 1 if init_guesses are None, and 2 otherwise.

        Returns
        -------
        output_table : `~astropy.table.Table` or None
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process.
            None is returned if no sources are found in ``image``.
        """

        output_table = Table()
        self._define_fit_param_names()

        for (init_param_name, fit_param_name) in zip(self._pars_to_set.keys(),
                                                     self._pars_to_output.keys()):
            output_table.add_column(Column(name=init_param_name))
            output_table.add_column(Column(name=fit_param_name))

        sources = self.finder(self._residual_image)

        n = n_start
        while(len(sources) > 0 and
              (self.niters is None or n <= self.niters)):
            apertures = CircularAperture((sources['xcentroid'],
                                          sources['ycentroid']),
                                          r=self.aperture_radius)
            sources['aperture_flux'] = aperture_photometry(self._residual_image,
                    apertures)['aperture_sum']

            init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'],
                               data=[sources['id'], sources['xcentroid'],
                               sources['ycentroid'],
                               sources['aperture_flux']])

            for param_tab_name, param_name in self._pars_to_set.items():
                if param_tab_name not in (['x_0', 'y_0', 'flux_0']):
                    init_guess_tab.add_column(Column(name=param_tab_name,
                            data=getattr(self.psf_model,
                                         param_name)*np.ones(len(sources))))

            star_groups = self.group_maker(init_guess_tab)
            table, self._residual_image = super(IterativelySubtractedPSFPhotometry,
                    self).nstar(self._residual_image, star_groups)

            star_groups = star_groups.group_by('group_id')
            table = hstack([star_groups, table])

            table['iter_detected'] = n*np.ones(table['x_fit'].shape, dtype=np.int32)

            output_table = vstack([output_table, table])
            sources = self.finder(self._residual_image)
            n += 1

        return output_table
예제 #48
0
def do_sdss(catalog):
    task_str = catalog.get_current_task_str()
    keys = list(catalog.entries.keys())

    Mnames, Mradec = [], []
    for oname in pbar(keys, task_str):
        # Some events may be merged in cleanup process, skip them if
        # non-existent.
        try:
            name = catalog.add_entry(oname)
        except Exception:
            catalog.log.warning(
                '"{}" was not found, suggests merge occurred in cleanup '
                'process.'.format(oname))
            continue

        if (FASTSTARS.RA not in catalog.entries[name]
                or FASTSTARS.DEC not in catalog.entries[name]):
            continue
        else:
            Mnames.append(name)
            Mradec.append(
                str(catalog.entries[name][FASTSTARS.RA][0]['value']) +
                str(catalog.entries[name][FASTSTARS.DEC][0]['value']))

    c = coord(Mradec, unit=(un.hourangle, un.deg), frame='icrs')

    # We must step through the data to query it, >100 is too many
    maxstep = 100  # stepsize
    Nentries = len(Mradec)
    roundindex = np.zeros(
        0
    )  # the stepping round that this star's data was acquired in, needed to connect the obj_id in the query to the name of the star
    for i in range(int(Nentries / maxstep) + 1):
        result_tmp = SDSS.query_crossid(
            c[maxstep * i:min(maxstep * (i + 1), Nentries)],
            timeout=200.,
            photoobj_fields=[
                'u', 'err_u', 'g', 'err_g', 'r', 'err_r', 'i', 'err_i', 'z',
                'err_z', 'MJD'
            ])
        roundindex = np.concatenate(
            [roundindex, i * np.ones(len(result_tmp['obj_id']))])
        if i == 0:
            result = result_tmp
        else:
            result = vstack([result, result_tmp])

    flagsuccess = result['obj_id']
    listfilter = ['u', 'g', 'r', 'i', 'z']
    for i in range(len(flagsuccess)):
        Mi = int(flagsuccess[i].strip('obj_')) + maxstep * int(roundindex[i])
        name = Mnames[Mi]
        source = catalog.entries[name].add_source(
            bibcode='2015ApJS..219...12A')
        for j in range(5):
            catalog.entries[name].add_photometry(
                time=str(result[i]['MJD']),
                u_time='MJD',
                telescope='SDSS',
                band=listfilter[j],
                magnitude=str(result[i][listfilter[j]]),
                e_magnitude=str(result[i]['err_' + listfilter[j]]),
                source=source)

    catalog.journal_entries()
    return
예제 #49
0
def get_simspec(simspecfile, log=None, nspec=None):
    ''' Get the simspec object

    The simspec table holds the "truth" spectra and the intrinsic properties
    of each object (redshift, noiseless photometry, [OII] flux, etc.).
    (Input spectra to simulate with pixsim.)

    http://desidatamodel.readthedocs.io/en/latest/DESI_SPECTRO_SIM/PIXPROD/NIGHT/simspec-EXPID.html

    Parameters:
        simspecfile (str):
            The filename of the input simspec file

    '''

    minwave = 3533.
    maxwave = 9913.1
    stepwave = 0.2
    scale = 1.e17
    exptime = 5.  # typical BOSS exposure time in s

    if simspecfile:
        if log:
            log.info('Reading input file {}'.format(args.simspec))
        # create SimSpec object
        simspec = lvmsim.io.read_simspec(args.simspec)
        # number of spectra to simulate from SimSpec
        sim_nspec = simspec.nspec

        # get the spectra and wavelengths arrays for different flavors
        if simspec.flavor == 'arc':
            # - TODO: do we need quickgen to support arcs?  For full pipeline
            # - arcs are used to measure PSF but aren't extracted except for
            # - debugging.
            # - TODO: if we do need arcs, this needs to be redone.
            # - conversion from phot to flux doesn't include throughput,
            # - and arc lines are rebinned to nearest 0.2 A.

            # Create full wavelength and flux arrays for arc exposure
            wave_b = np.array(simspec.wave['b'])
            wave_r = np.array(simspec.wave['r'])
            wave_z = np.array(simspec.wave['z'])
            phot_b = np.array(simspec.phot['b'][0])
            phot_r = np.array(simspec.phot['r'][0])
            phot_z = np.array(simspec.phot['z'][0])
            sim_wave = np.concatenate((wave_b, wave_r, wave_z))
            sim_phot = np.concatenate((phot_b, phot_r, phot_z))
            wavelengths = np.arange(minwave, maxwave, stepwave)
            phot = np.zeros(len(wavelengths))
            for i in range(len(sim_wave)):
                wavelength = sim_wave[i]
                flux_index = np.argmin(abs(wavelength - wavelengths))
                phot[flux_index] = sim_phot[i]
            # Convert photons to flux: following specter conversion method
            dw = np.gradient(wavelengths)
            fibarea = const.pi * (1.07e-2 /
                                  2)**2  # cross-sectional fiber area in cm^2
            hc = scale * const.h * const.c  # convert to erg A
            spectra = (hc * exptime * fibarea * dw * phot) / wavelengths
        else:
            wavelengths = simspec.wave['brz']
            spectra = simspec.flux

        # check there's enough spectra to simulate from what we ask for
        if sim_nspec < nspec:
            log.info("Only {} spectra in input file".format(sim_nspec))
            nspec = sim_nspec
    else:
        # Initialize the output truth table.
        spectra = []
        wavelengths = qsim.source.wavelength_out.to(u.Angstrom).value
        npix = len(wavelengths)
        truth = dict()
        meta = Table()
        truth['OBJTYPE'] = np.zeros(args.nspec, dtype=(str, 10))
        truth['FLUX'] = np.zeros((args.nspec, npix))
        truth['WAVE'] = wavelengths
        jj = list()

        for thisobj in set(true_objtype):
            ii = np.where(true_objtype == thisobj)[0]
            nobj = len(ii)
            truth['OBJTYPE'][ii] = thisobj
            if log:
                log.info('Generating {} template'.format(thisobj))

            # Generate the templates
            if thisobj == 'ELG':
                elg = lvmsim.templates.ELG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = elg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_elg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'LRG':
                lrg = lvmsim.templates.LRG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = lrg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_lrg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'QSO':
                qso = lvmsim.templates.QSO(wave=wavelengths)
                flux, tmpwave, meta1 = qso.make_templates(
                    nmodel=nobj, seed=args.seed, zrange=args.zrange_qso)
            elif thisobj == 'BGS':
                bgs = lvmsim.templates.BGS(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = bgs.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_bgs,
                    rmagrange=args.rmagrange_bgs,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'STD':
                fstd = lvmsim.templates.FSTD(wave=wavelengths)
                flux, tmpwave, meta1 = fstd.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'QSO_BAD':  # use STAR template no color cuts
                star = lvmsim.templates.STAR(wave=wavelengths)
                flux, tmpwave, meta1 = star.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'MWS_STAR' or thisobj == 'MWS':
                mwsstar = lvmsim.templates.MWS_STAR(wave=wavelengths)
                flux, tmpwave, meta1 = mwsstar.make_templates(nmodel=nobj,
                                                              seed=args.seed)
            elif thisobj == 'WD':
                wd = lvmsim.templates.WD(wave=wavelengths)
                flux, tmpwave, meta1 = wd.make_templates(nmodel=nobj,
                                                         seed=args.seed)
            elif thisobj == 'SKY':
                flux = np.zeros((nobj, npix))
                meta1 = Table(dict(REDSHIFT=np.zeros(nobj, dtype=np.float32)))
            elif thisobj == 'TEST':
                flux = np.zeros((args.nspec, npix))
                indx = np.where(wave > 5800.0 - 1E-6)[0][0]
                ref_integrated_flux = 1E-10
                ref_cst_flux_density = 1E-17
                single_line = (np.arange(args.nspec) % 2 == 0).astype(
                    np.float32)
                continuum = (np.arange(args.nspec) % 2 == 1).astype(np.float32)

                for spec in range(nspec):
                    flux[spec, indx] = single_line[
                        spec] * ref_integrated_flux / np.gradient(wavelengths)[
                            indx]  # single line
                    flux[spec] += continuum[
                        spec] * ref_cst_flux_density  # flat continuum

                meta1 = Table(
                    dict(REDSHIFT=np.zeros(args.nspec, dtype=np.float32),
                         LINE=wave[indx] *
                         np.ones(args.nspec, dtype=np.float32),
                         LINEFLUX=single_line * ref_integrated_flux,
                         CONSTFLUXDENSITY=continuum * ref_cst_flux_density))
            else:
                raise RuntimeError('Unknown object type')

            # Pack it in.
            truth['FLUX'][ii] = flux
            meta = vstack([meta, meta1])
            jj.append(ii.tolist())

            # Sanity check on units; templates currently return ergs, not 1e-17 ergs...
            # assert (thisobj == 'SKY') or (np.max(truth['FLUX']) < 1e-6)

        # Sort the metadata table.
        jj = sum(jj, [])
        meta_new = Table()
        for k in range(nspec):
            index = int(np.where(np.array(jj) == k)[0])
            meta_new = vstack([meta_new, meta[index]])
        meta = meta_new

        # Add TARGETID and the true OBJTYPE to the metadata table.
        meta.add_column(
            Column(true_objtype, dtype=(str, 10), name='TRUE_OBJTYPE'))
        meta.add_column(Column(targetids, name='TARGETID'))

        # Rename REDSHIFT -> TRUEZ anticipating later table joins with zbest.Z
        meta.rename_column('REDSHIFT', 'TRUEZ')

    return spectra, wavelengths, nspec
예제 #50
0
Bd0.sort('MJD')
Vd0=Vd[Vd['DET']=='Y']
Vd0.sort('MJD')
Rd0=Rd[Rd['DET']=='Y']
Rd0.sort('MJD')
Id0=Id[Id['DET']=='Y']
Id0.sort('MJD')
gd0=gd[gd['DET']=='Y']
gd0.sort('MJD')
rd0=rd[rd['DET']=='Y']
rd0.sort('MJD')
iid0=iid[iid['DET']=='Y']
iid0.sort('MJD')

vsd=vstack([Bd0['MJD','MAG','MAGERR'], Vd0['MJD','MAG','MAGERR'],
        Rd0['MJD','MAG','MAGERR'], Id0['MJD','MAG','MAGERR'],
        gd0['MJD','MAG','MAGERR'], rd0['MJD','MAG','MAGERR'], iid0['MJD','MAG','MAGERR']])
vsd.write('SN2018kp_snpy.dat',format='ascii.commented_header', overwrite=True)
# edit file incuding filter names

# filter set check
snpy.fset.list_filters()
'''
APO
	SDSS
		'u_s':  sloan u at APO
		'g_s':  sloan g at APO
		'r_s':  sloan r at APO
		'i_s':  sloan i at APO
		'z_s':  sloan z at APO
CTIO
예제 #51
0
파일: curve_io.py 프로젝트: arseeq/sntd
    def color_table(self,band1,band2,time_delays=None,magnifications=None,referenceImage='image_1',ignore_images=[]):
        """Takes the multiple images in self.images and combines
            the data into a single color curve using defined
            time delays and magnifications or best (quick) guesses.

        Parameters
        ----------
        band1: str
            The first band for color curve
        band2: str
            The second band for color curve
        time_delays : :class:`dict`
            Dictionary with image names as keys and relative time
            delays as values (e.g. {'image_1':0,'image_2':20})
        magnifications : dict
            Dictionary with image names as keys and relative
            magnifications as values (e.g.
            {'image_1':1,'image_2':1.1})
        referenceImage: str
            The image you want to be the reference (e.g. iamge_1, image_2, etc.)
        ignore_images: list
            List of images you do not want to include in the color curve.

        Returns
        -------
        self: :class:`sntd.curve_io.curveDict`
        """

        ignore_images=list(ignore_images) if not isinstance(ignore_images,(list,tuple)) else ignore_images
        names=['time','image','zpsys']
        dtype=[self.table.dtype[x] for x in names]
        names=np.append(names,[band1+'-'+band2,band1+'-'+band2+'_err'])
        dtype=np.append(dtype,[dtype[0],dtype[0]])
        self.color.table=Table(names=names,dtype=dtype)
        if not time_delays:
            time_delays=_guess_time_delays(self,referenceImage) #TODO fix these guessing functions
        if not magnifications:
            magnifications=_guess_magnifications(self,referenceImage)
        for im in [x for x in self.images.keys() if x not in ignore_images]:
            temp2=copy(self.images[im].table[self.images[im].table['band']==band2])
            temp1=copy(self.images[im].table[self.images[im].table['band']==band1])
            temp1=temp1[temp1['flux']>0]
            temp2=temp2[temp2['flux']>0]
            temp1['flux']/=magnifications[im]
            temp2['flux']/=magnifications[im]
            temp1['time']-=time_delays[im]
            temp2['time']-=time_delays[im]


            temp2['mag']=-2.5*np.log10(temp2['flux'])+temp2['zp']
            temp2['magerr']=1.0857*temp2['fluxerr']/temp2['flux']
            temp1['mag']=-2.5*np.log10(temp1['flux'])+temp1['zp']
            temp1['magerr']=1.0857*temp1['fluxerr']/temp1['flux']
            temp1_remove=[i for i in range(len(temp1)) if temp1['time'][i] not in temp2['time']]
            temp1.remove_rows(temp1_remove)
            temp2_remove=[i for i in range(len(temp2)) if temp2['time'][i] not in temp1['time']]
            temp2.remove_rows(temp2_remove)

            temp1['magerr']=np.sqrt(temp2['magerr']**2+temp1['magerr']**2)


            temp1['mag']-=temp2['mag']
            temp1.rename_column('mag',band1+'-'+band2)
            temp1.rename_column('magerr',band1+'-'+band2+'_err')
            to_remove=[x for x in temp1.colnames if x not in names]
            temp1.remove_columns(to_remove)


            self.color.table=vstack([self.color.table,copy(temp1)])
        self.color.table.sort('time')
        return(self)
예제 #52
0
def voronoi_binning(image,
                    obj_name,
                    targetSN=50,
                    largest_bin=5,
                    smallest_bin=0,
                    minimumSN=7,
                    quiet=True,
                    plot=True):
    """
    Function to bin an image using the Voronoi binning method by Cappellari & Copin (2003)
    
    Input as 'image' the target with the filter where S/N is highest
    
    """
    import numpy as np
    import astropy.io.fits as pyfits
    from grizli import utils
    from grizli import prep
    import matplotlib.pyplot as plt
    from astropy.table import vstack

    im = pyfits.open(image)

    sci = np.cast[np.float32](im['SCI'].data)
    sh = sci.shape

    ivar = np.cast[np.float32](im['WHT'].data)
    var = 1 / ivar

    orig_mask = (ivar > 0)
    sci[~orig_mask] = 0
    var[~orig_mask] = 0
    orig_var = var * 1

    # Simple background
    bkg = np.median(sci[orig_mask])
    sci -= bkg * orig_mask

    cps = sci * im[0].header['EXPTIME']
    shot_err = np.sqrt(np.maximum(cps, 4)) / im[0].header['EXPTIME']
    var2 = var + shot_err**2
    var = var2

    full_bin_seg = np.zeros(sh, dtype=np.int) - 1

    yp, xp = np.indices(sci.shape)

    xpf = xp.flatten()
    ypf = yp.flatten()

    # Initialize mask
    mask = orig_mask & True
    bin_min = 1
    full_image = sci * 0.
    full_err = sci * 0.

    idx = np.arange(sci.size, dtype=np.int)
    full_image = full_image.flatten()
    full_err = full_err.flatten()

    # id, bin, xmin, xmax, ymin, ymax, npix
    full_bin_data = []

    SKIP_LAST = False

    bin_iter = 0
    bin_factor = largest_bin

    NO_NEWLINE = '\x1b[1A\x1b[1M'

    for bin_iter, bin_factor in enumerate(range(largest_bin + 1)[::-1]):
        bin = 2**bin_factor

        if bin_factor < smallest_bin:
            break

        if (bin_factor == 0) & SKIP_LAST:
            continue

        ypb = yp[mask] // bin
        xpb = xp[mask] // bin

        if bin_factor > 0:
            binned_sci = np.zeros((sh[0] // bin + 1, sh[1] // bin + 1))
            binned_npix = binned_sci * 0
            binned_var = binned_sci * 0

            ypi, xpi = np.indices(binned_sci.shape)

            # Only consider unmasked bins
            ij = np.unique(xpb + sh[0] // bin * ypb)
            yarr = ij // (sh[0] // bin)
            xarr = ij - (sh[0] // bin) * yarr

            for xi, yi in zip(xarr, yarr):
                if not quiet:
                    print(NO_NEWLINE + '{0} {1}/{2} {3}/{4}'.format(
                        bin_factor, xi, xarr.max(), yi, yarr.max()))
                slx = slice(xi * bin, xi * bin + bin)
                sly = slice(yi * bin, yi * bin + bin)
                mslice = mask[sly, slx]
                # Straight average
                binned_sci[yi, xi] = sci[sly, slx][mslice].sum()
                binned_npix[yi, xi] = mslice.sum()
                binned_var[yi, xi] = var[sly, slx][mslice].sum()

            binned_err = np.sqrt(binned_var) / binned_npix
            binned_avg = binned_sci / binned_npix

            mask_i = (binned_npix > 0) & (binned_avg / binned_err > minimumSN)
            xpi = xpi[mask_i]
            ypi = ypi[mask_i]
            binned_avg = binned_avg[mask_i]
            binned_err = binned_err[mask_i]
            binned_npix = binned_npix[mask_i]

        else:
            mask_i = mask_j
            xpi = xp[mask]
            ypi = yp[mask]
            binned_avg = sci[mask]
            binned_err = np.sqrt(var)[mask]
            binned_npix = mask[mask] * 1

        if True:

            # Mask pixels in that don't satisfy S/N cutoff as they are
            # unreliable for vorbin
            clip_mask = mask < 0
            for xi, yi in zip(xpi, ypi):
                slx = slice(xi * bin, xi * bin + bin)
                sly = slice(yi * bin, yi * bin + bin)
                clip_mask[sly, slx] = True

            mask &= clip_mask

            # Identify blobs (usually the main central galaxies) and
            # only consider blobs larger than 20% of the largest blob
            if bin_factor == largest_bin:

                label_image = label(mask)
                label_ids = np.unique(label_image)[1:]
                label_sizes = np.array([(label_image == id_i).sum()
                                        for id_i in label_ids])

                keep_ids = label_ids[label_sizes > 0.2 * label_sizes.max()]
                keep_mask = mask < -1
                for i in keep_ids:
                    keep_mask |= label_image == i

                mask &= keep_mask

                # In binned_coords
                in_blob = keep_mask[ypi * bin, xpi * bin]
                msg = 'Drop {0} bins not in main blob'

                print(msg.format((~in_blob).sum()))
                xpi = xpi[in_blob]
                ypi = ypi[in_blob]
                binned_avg = binned_avg[in_blob]
                binned_err = binned_err[in_blob]
                binned_npix = binned_npix[in_blob]

            ypb = yp[mask] // bin
            xpb = xp[mask] // bin

        print('Run voronoi_2d_binning, bin_factor={0}'.format(bin_factor))

        res = voronoi_2d_binning(xpi,
                                 ypi,
                                 binned_avg,
                                 binned_err,
                                 targetSN,
                                 quiet=True,
                                 plot=False,
                                 pixelsize=0.1 * bin,
                                 cvt=True,
                                 wvt=True)

        binNum, xBin, yBin, xBar, yBar, sn, nPixels, scale = res

        # Put Voronoi bins with nPixels > 1 back in the original image
        NBINS = len(nPixels)

        if (bin_factor == smallest_bin) & (smallest_bin > 0):
            valid_bins = nPixels > 0
        else:
            valid_bins = nPixels > 1

        large_bin_ids = np.arange(NBINS)[valid_bins]

        # Put bin in original 2D array and store info
        for b0, bin_id in enumerate(large_bin_ids):
            m_i = binNum == bin_id
            xi_bin = xpi[m_i]
            yi_bin = ypi[m_i]
            for xi, yi in zip(xi_bin, yi_bin):
                slx = slice(xi * bin, xi * bin + bin)
                sly = slice(yi * bin, yi * bin + bin)
                mslice = mask[sly, slx]
                full_bin_seg[sly, slx][mslice] = b0 + bin_min

            # Bin properties
            id_i = b0 + bin_min
            xmin = xi_bin.min() * bin - 1
            xmax = xi_bin.max() * bin + 1
            ymin = yi_bin.min() * bin - 1
            ymax = yi_bin.max() * bin + 1
            npix = m_i.sum() * bin**2
            bin_data_i = [id_i, bin, xmin, xmax, ymin, ymax, npix]
            full_bin_data.append(bin_data_i)

        # Update the mask
        not_in_a_bin = full_bin_seg == -1
        mask &= not_in_a_bin

        bin_min = full_bin_data[-1][0] + 1
        if not quiet:
            print('\n\n\n\n\n bin_factor: {0}, bin_min: {1}\n\n\n\n'.format(
                bin_factor, bin_min))

    ## Bin information
    bin_data = np.array(full_bin_data)
    # bin_data_i = [id_i, bin, xmin, xmax, ymin, ymax, npix]
    tab = utils.GTable()
    for i, c in enumerate(
        ['id', 'bin', 'xmin', 'xmax', 'ymin', 'ymax', 'npix']):
        tab[c] = bin_data[:, i]
        if 'min' in c:
            tab[c] -= tab['bin']
        elif 'max' in c:
            tab[c] += tab['bin']

    # Make a table for the individual pixels
    if mask.sum() > 0:
        single_table = single_pixel_table(mask, start_id=1 + tab['id'].max())
        full_bin_seg[mask] = single_table['id']
        tab = vstack([tab, single_table])

    tab['flux'], tab['err'], tab['area'] = prep.get_seg_iso_flux(
        sci, full_bin_seg, tab, err=np.sqrt(var))

    binned_flux = prep.get_seg_iso_flux(sci,
                                        full_bin_seg,
                                        tab,
                                        fill=tab['flux'] / tab['area'])
    binned_err = prep.get_seg_iso_flux(sci,
                                       full_bin_seg,
                                       tab,
                                       fill=tab['err'] / tab['area'])
    binned_area = prep.get_seg_iso_flux(sci,
                                        full_bin_seg,
                                        tab,
                                        fill=tab['area'])
    binned_bin = prep.get_seg_iso_flux(sci, full_bin_seg, tab, fill=tab['bin'])

    binned_flux[mask] = sci[mask]
    binned_err[mask] = np.sqrt(var)[mask]
    binned_area[mask] = 1
    binned_bin[mask] = 1

    if plot:
        plt.figure(figsize=(12, 12))
        plt.imshow(binned_flux, vmin=-0.5, vmax=2)

    # Save output into image fits file
    primary_extn = pyfits.PrimaryHDU()
    sci_extn = pyfits.ImageHDU(data=binned_flux.astype(np.float32), name='SCI')
    err_extn = pyfits.ImageHDU(data=binned_err.astype(np.float32), name='ERR')
    hdul = pyfits.HDUList([primary_extn, sci_extn, err_extn])
    for ext in [0, 1]:
        for k in im[ext].header:
            if k not in hdul[ext].header:
                if k in ['COMMENT', 'HISTORY', '']:
                    continue
                hdul[ext].header[k] = im[ext].header[k]
    hdul.writeto('binned_{0}_image.fits'.format(obj_name),
                 output_verify='fix',
                 overwrite=True)

    tab.write('binned_{0}_table.fits'.format(obj_name), overwrite=True)
    pyfits.writeto('binned_{0}_seg.fits'.format(obj_name),
                   data=full_bin_seg,
                   overwrite=True)
    pyfits.writeto('binned_{0}_mask.fits'.format(obj_name),
                   data=mask * 1,
                   overwrite=True)

    return tab, full_bin_seg, mask
예제 #53
0
    def generate_full_table(self, gen_small_table=False, load=None):
        """
        Generates the full table from the gaia, tmass, and allwise queries.

        Xmatch Gaia and tmass
        Xmatch Gaia with allwise
        Xmatch tmass with allwise
        Xmatch (Gaia x tmass) with allwise

        We then append:
        sources in all three catalogs
        sources in two of the catalogs
        sources in just one catalog

        Arguments:
            gen_small_table [bool]: Whether to only use a subset of available columns. False by default.
            load [string]: Directory from which to load an already generated full_table. None by default.
        """

        out("\nGenerating full table...")

        out("Extracting specified data columns...")

        if gen_small_table:

            # Gaia columns in small_table
            gaia_cat_table = self.gaia.table["designation", "ra", "dec",
                                             "ra_error", "dec_error",
                                             "parallax", "parallax_error",
                                             "pmra", "pmra_error", "pmdec",
                                             "pmdec_error", "phot_g_mean_mag",
                                             "bp_rp", "bp_g", "g_rp",
                                             "phot_g_n_obs",
                                             "phot_g_mean_flux_over_error",
                                             "phot_g_mean_flux_error",
                                             "phot_g_mean_flux",
                                             "phot_bp_mean_mag",
                                             "phot_rp_mean_mag"]
            gaia_cat = CatalogTable(["gaia"], gaia_cat_table)

            # 2Mass columns in small_table
            tmass_cat_table = self.tmass.table["designation", "ra", "dec",
                                               "err_maj", "err_min", "j_m",
                                               "h_m", "k_m", "j_cmsig",
                                               "h_cmsig", "k_cmsig", "k_snr"]
            tmass_cat = CatalogTable(["2mass"], tmass_cat_table)

            # Allwise columns in small_table
            allwise_cat_table = self.allwise.table["designation", "ra", "dec",
                                                   "sigra", "sigdec", "w1mpro",
                                                   "w2mpro", "w3mpro",
                                                   "w4mpro", "w1sigmpro",
                                                   "w2sigmpro", "w3sigmpro",
                                                   "w4sigmpro", "w4snr"]
            allwise_cat = CatalogTable(["allwise"], allwise_cat_table)

        else:

            gaia_cat = self.gaia

            tmass_cat = self.tmass

            allwise_cat = self.allwise

        # rename columns to avoid name collisions later
        out("Renaming columns...")

        gaia_cat.table.rename_column("designation", "gaia_designation")
        gaia_cat.table.rename_column("ra", "gaia_ra")
        gaia_cat.table.rename_column("dec", "gaia_dec")

        tmass_cat.table.rename_column("designation", "2mass_designation")
        tmass_cat.table.rename_column("ra", "2mass_ra")
        tmass_cat.table.rename_column("dec", "2mass_dec")

        allwise_cat.table.rename_column("designation", "allwise_designation")
        allwise_cat.table.rename_column("ra", "allwise_ra")
        allwise_cat.table.rename_column("dec", "allwise_dec")

        out("Crossmatching Gaia with 2MASS...")
        gaia_X_tmass = self.merge_tables(gaia_cat, tmass_cat)

        out("Crossmatching Gaia with AllWISE...")
        gaia_X_allwise = self.merge_tables(gaia_cat, allwise_cat)

        out("Crossmatching 2MASS with AllWISE...")
        tmass_X_allwise = self.merge_tables(tmass_cat, allwise_cat)

        out("Crossmatching all three catalogs...")
        gaia_X_tmass_X_allwise = self.merge_tables(gaia_X_tmass, allwise_cat)

        info_out(
            str(len(gaia_X_tmass_X_allwise.table['gaia_designation'])) +
            " sources in all three catalogs.")

        full_table = gaia_X_tmass_X_allwise

        out("Adding objects that do not appear in all catalogs...")

        # sources in gaia_X_tmass that are not in gaia_X_tmass_X_allwise
        # i.e. sources in Gaia and Tmass but not Allwise
        diff1 = self.table_difference(gaia_X_tmass, full_table,
                                      "gaia_designation", ["allwise"],
                                      gaia_cat, tmass_cat, allwise_cat)
        info_out(
            str(len(diff1['gaia_designation'])) +
            " sources in Gaia and 2MASS but not AllWISE.")
        full_table.table = vstack([full_table.table, diff1])

        # sources in gaia_X_allwise that are not in gaia_X_tmass_X_allwise
        # i.e. sources in Gaia and Allwise but not Tmass.
        diff2 = self.table_difference(gaia_X_allwise, full_table,
                                      "gaia_designation", ["2mass"], gaia_cat,
                                      tmass_cat, allwise_cat)
        info_out(
            str(len(diff2['gaia_designation'])) +
            " sources in Gaia and AllWISE but not 2MASS.")
        full_table.table = vstack([full_table.table, diff2])

        # objects in tmass_X_allwise that are not in gaia_X_tmass_X_allwise,
        # i.e. sources in tmass and allwise but not gaia.
        diff3 = self.table_difference(tmass_X_allwise, full_table,
                                      "allwise_designation", ["gaia"],
                                      gaia_cat, tmass_cat, allwise_cat)
        info_out(
            str(len(diff3['allwise_designation'])) +
            " sources in 2MASS and AllWISE but not Gaia.")
        full_table.table = vstack([full_table.table, diff3])

        # objects in gaia but not yet in full_table
        diff4 = self.table_difference(gaia_cat, full_table, "gaia_designation",
                                      ["2mass", "allwise"], gaia_cat,
                                      tmass_cat, allwise_cat)
        info_out(
            str(len(diff4['gaia_designation'])) + " sources in Gaia only.")
        full_table.table = vstack([full_table.table, diff4])

        # objects in tmass but not yet in full_table
        diff5 = self.table_difference(tmass_cat, full_table,
                                      "2mass_designation", ["gaia", "allwise"],
                                      gaia_cat, tmass_cat, allwise_cat)
        info_out(
            str(len(diff5['2mass_designation'])) + " sources in 2mass only.")
        full_table.table = vstack([full_table.table, diff5])

        # objects in allwise but not yet in full_table
        diff6 = self.table_difference(allwise_cat, full_table,
                                      "allwise_designation", ["gaia", "2mass"],
                                      gaia_cat, tmass_cat, allwise_cat)
        info_out(
            str(len(diff6['allwise_designation'])) +
            " sources in AllWISE only.")
        full_table.table = vstack([full_table.table, diff6])

        out("Computing user-defined columns...")

        out("Variability...")
        v = np.sqrt(full_table.table['phot_g_n_obs'] /
                    full_table.table['phot_g_mean_flux_over_error'])
        v.name = "variability"
        v.unit = "(n_obs / mag)^0.5"
        full_table.table.add_column(v)

        out("Radial distance...")
        d = 1000 / full_table.table['parallax']
        d.name = "radial_distance"
        d.unit = "pc"
        full_table.table.add_column(d)

        info_out("Full table generated.")

        fname = tpath + "/full_table.dat"
        full_table.table.write(fname, format='ascii.ecsv')

        return (full_table)
예제 #54
0
def phot(images,
         positions=None,
         aperture_radius=6.0,
         annulus=8.0,
         dannulus=10.0,
         method='exact',
         bg_method='median',
         bg_box_size=50,
         gain=1.1,
         output=None):
    """
    Basic circular aperture photometry of given images.

    Parameters
    ----------
    images : generator or list of 'ccdproc.CCDData'
        Images to be combined.

    positions : list
        The positions should be either a single tuple of (x, y), a list of (x, y) tuples, or
        an array with shape Nx2, where N is the number of positions.

        Default is 'None'.


    aperture_radius : list or float
        The aperture radius of a source.

        Default is '3.0'.

    annulus : float
        The circular inner annulus aperture radius of a source.

        Default is '6.0'.

    dannulus : float
        The circular outer annulus aperture radius of a source.

        Default is '8.0'.

    method : {'exact', 'center', 'subpixel'}, optional
        The method used to determine the overlap of the aperture on the
        pixel grid.  Not all options are available for all aperture
        types.  Note that the more precise methods are generally slower.
        The following methods are available:

            * ``'exact'`` (default):
                The the exact fractional overlap of the aperture and
                each pixel is calculated.  The returned mask will
                contain values between 0 and 1.

            * ``'center'``:
                A pixel is considered to be entirely in or out of the
                aperture depending on whether its center is in or out of
                the aperture.  The returned mask will contain values
                only of 0 (out) and 1 (in).

            * ``'subpixel'``:
                A pixel is divided into subpixels (see the ``subpixels``
                keyword), each of which are considered to be entirely in
                or out of the aperture depending on whether its center
                is in or out of the aperture.  If ``subpixels=1``, this
                method is equivalent to ``'center'``.  The returned mask
                will contain values between 0 and 1.

    bg_method: {'mean', 'median'}, optional
        The statistic used to calculate the background.
        All measurements are sigma clipped.

    bg_box_size: int
    Selecting the box size requires some care by the user. The box size
    should generally be larger than the typical size of sources in the
    image, but small enough to encapsulate any background variations. For
    best results, the box size should also be chosen so that the data are
    covered by an integer number of boxes in both dimensions. More information:
    https://github.com/Gabriel-p/photpy/blob/master/IRAF_compare/aperphot.py

    gain: float
        CCD gain value.

    output : None or str
        If it is None, function returns just Table.
        If it is 'str', function creates photometry result file.


    Returns
    -------
    'A table object or file'

    Examples
    --------
    >>> from tuglib.io import FitsCollection
    >>> from tuglib.analysis import phot
    >>> location = '/Users/ykilic/Desktop/data/'
    >>> images = FitsCollection(location, file_extension="fit")
    >>> fits_images = images.ccds(IMAGETYP='object')
    >>> phot(fits_images, positions=[(1400.65, 1545.78)], aperture_radius=10.0, annulus=12.0, dannulus=15)
    """

    if not isinstance(images, (list, types.GeneratorType)):
        raise TypeError("'images' should be a 'ccdproc.CCDData' object.")

    if positions is not None:
        if not isinstance(positions, (list, type(None))):
            raise TypeError("'positions' should be 'list' or 'None' object.")

    if not isinstance(aperture_radius, (list, float, int)):
        raise TypeError("'radius' should be a 'list' or 'float' object.")

    if not isinstance(annulus, (float, int)):
        raise TypeError("'annulus' should be a 'int' or 'float' object.")

    if not isinstance(dannulus, (float, int)):
        raise TypeError("'dannulus' should be a 'int' or 'float' object.")

    if not isinstance(method, str):
        raise TypeError("'method' should be a 'str' object.")

    if not isinstance(bg_method, str):
        raise TypeError("'bg_method' should be a 'str' object.")

    if not isinstance(bg_box_size, int):
        raise TypeError("'bg_box_size' should be a 'int' or 'float' object.")

    if not isinstance(gain, (float, int)):
        raise TypeError("'gain' should be a 'float' object.")

    if not isinstance(output, (type(None), type(str))):
        raise TypeError("'output' should be 'None' or 'str' objects.")

    if isinstance(images, types.GeneratorType):
        try:
            ccds = list(images)
        except IndexError:
            return None
    else:
        ccds = images

    ccds_phot_list = list()

    for ccd in ccds:
        exptime = ccd.meta['EXPTIME']

        box_xy = (bg_box_size, bg_box_size)
        sigma_clip = SigmaClip(sigma=3.)
        bkg_estimator = MedianBackground()
        bkg = Background2D(ccd,
                           box_xy,
                           filter_size=(3, 3),
                           sigma_clip=sigma_clip,
                           bkg_estimator=bkg_estimator)

        ccd.uncertainty = StdDevUncertainty(
            calc_total_error(ccd.data, bkg.background, gain))

        aperture = CircularAperture(positions, r=aperture_radius)
        annulus_aperture = CircularAnnulus(positions,
                                           r_in=annulus,
                                           r_out=dannulus)
        annulus_masks = annulus_aperture.to_mask(method=method)

        bkg_phot = []
        for mask in annulus_masks:
            annulus_data = mask.multiply(ccd)
            annulus_data_1d = annulus_data[mask.data > 0]
            mean_sigclip, median_sigclip, stddev_sigclip = sigma_clipped_stats(
                annulus_data_1d)

            if bg_method == "mean":
                sigclip = mean_sigclip
            elif bg_method == "median":
                sigclip = median_sigclip
            else:
                sigclip = median_sigclip

            bkg_phot.append(sigclip)

        bkg_phot = np.array(sigclip)
        phot_table = aperture_photometry(ccd, aperture)
        phot_table['annulus_{}'.format(bg_method)] = bkg_phot * u.adu
        phot_table['aper_bkg'] = bkg_phot * aperture.area * u.adu
        phot_table['residual_aperture_sum'] = phot_table[
            'aperture_sum'] - phot_table['aper_bkg']

        phot_table['flux'] = phot_table['residual_aperture_sum'] / float(
            exptime)
        phot_table['flux'].info.format = '%.3f'
        phot_table['JD'] = float(ccd.meta['JD'])
        phot_table['JD'].info.format = '%.8f'
        phot_table['JD'].unit = 'd'
        phot_table['aperture_sum'].info.format = '%.3f'
        phot_table['aperture_sum_err'].info.format = '%.3f'
        phot_table['annulus_{}'.format(bg_method)].info.format = '%.3f'
        phot_table['aper_bkg'].info.format = '%.3f'
        phot_table['residual_aperture_sum'].info.format = '%.3f'
        phot_table = flux2mag(phot_table)

        phot_table['mag_err'] = 1.0857 * (
            phot_table['aperture_sum_err'] /
            phot_table['residual_aperture_sum']) * u.mag
        phot_table['mag_err'].info.format = '%.3f'
        ccds_phot_list.append(phot_table)

    return vstack(ccds_phot_list)
예제 #55
0
import os
import glob

import numpy as np
from astropy.table import Table, vstack
import matplotlib.pyplot as plt

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)

t_all = []

for filename in glob.glob(os.path.join('tables', '*.fits')):

    print(filename)
    t = Table.read(filename)
    sub = t[~np.isnan(t['altitude'])]['timestamp', 'callsign', 'longitude',
                                      'latitude', 'altitude']
    if len(sub) > 0:
        t_all.append(sub)

t_all = vstack(t_all)
t_all.write('summary.fits', overwrite=True)
예제 #56
0
            star1 = stmass_pc2(tab1['mass_ssp_sm'],
                               dist=dist.loc[gal]['caDistP3d'],
                               name='sigstar_sm')
            avstar0 = stmass_pc2(tab0['mass_Avcor_ssp_rg'],
                                 dist=dist.loc[gal]['caDistP3d'],
                                 name='sigstar_Avcor_rg')
            avstar1 = stmass_pc2(tab1['mass_Avcor_ssp_sm'],
                                 dist=dist.loc[gal]['caDistP3d'],
                                 name='sigstar_Avcor_sm')
            tab0.add_column(star0)
            tab1.add_column(star1)
            tab0.add_column(avstar0)
            tab1.add_column(avstar1)

    if len(rglist) > 0:
        rg_merge = vstack(rglist)
    rg_merge.meta['date'] = datetime.today().strftime('%Y-%m-%d')
    print(rg_merge.colnames)
    print('There are', len(rg_merge), 'rows in native table')

    if len(smlist) > 0:
        sm_merge = vstack(smlist)
    sm_merge.meta['date'] = datetime.today().strftime('%Y-%m-%d')
    print(sm_merge.colnames)
    print('There are', len(sm_merge), 'rows in smoothed table')

    if (len(filelist) > 1):
        outname = 'edge'
    else:
        outname = gal
    if prod == prodtype[0]:
예제 #57
0
magl_lim = lambda mag, mag_lim : (mag < mag_lim) 
selection = lambda t, mag_lim : area_ero(t['DEC'], t['g_lat'], t['g_lon']) & magl_lim( t['MAG'], mag_lim )
nl = lambda sel : len(sel.nonzero()[0])
#t = t1
s1 = selection(t1, 24.5)
print(nl(s1))
s2 = selection(t2, 22.0)
print(nl(s2))
s3 = selection(t3, 20.5)
print(nl(s3))
path_2_output = os.path.join(root_dir, 'S5_4MOST_RFIB215.fit')

t1_b = Table(t1[s1])
t2_b = Table(t2[s2])
t3_b = Table(t3[s3])

t1_b.remove_columns(['ra','dec','g_lat','g_lon','ecl_lat','ecl_lon','redshift_R','redshift_S','dL_cm','galactic_NH','galactic_ebv','galaxy_stellar_mass','galaxy_star_formation_rate','galaxy_LX_hard','is_quiescent','HALO_M200c','HALO_M500c','HALO_Mvir','HALO_Acc_Rate_1Tdyn','HALO_rs','HALO_rvir','HALO_vmax','Vrms','HALO_id','HALO_host_id','angular_distance_to_cluster_center_in_rvir','comoving_distance_to_cluster_in_rvir','redshift_R_distance_to_cluster','redshift_S_distance_to_cluster','galaxy_gr','galaxy_ri','galaxy_iz','richness','galaxy_mag_abs_r','galaxy_mag_r','sdss_g','sdss_r','sdss_i','sdss_z','sdss_g_err','sdss_r_err','sdss_i_err','sdss_z_err'])
t2_b.remove_columns(['ra','dec','g_lat','g_lon','ecl_lat','ecl_lon','redshift_R','redshift_S','dL_cm','galactic_NH','galactic_ebv','galaxy_stellar_mass','galaxy_star_formation_rate','galaxy_LX_hard','is_quiescent','HALO_M200c','HALO_M500c','HALO_Mvir','HALO_Acc_Rate_1Tdyn','HALO_rs','HALO_rvir','HALO_vmax','Vrms','HALO_id','HALO_host_id','angular_distance_to_cluster_center_in_rvir','comoving_distance_to_cluster_in_rvir','redshift_R_distance_to_cluster','redshift_S_distance_to_cluster','galaxy_gr','galaxy_ri','galaxy_iz','richness','galaxy_mag_abs_r','galaxy_mag_r','sdss_g','sdss_r','sdss_i','sdss_z','sdss_g_err','sdss_r_err','sdss_i_err','sdss_z_err'])
t3_b.remove_columns(['Z','Mstar','SFR','EBV','K_mag_abs','rtot','rfib','ug','gr','ri','iz','zy','yj','jh','hks','g_lat','g_lon'])
t3_c=Table(t3_b)
t3_c.remove_columns('NAME')
t3_c.add_column(Column(name='NAME', data=t3_b['NAME']), index=0)

test = vstack((t1_b, t2_b, t3_c))

test['MAG_TYPE'][:]="SDSS_r_AB"

test.write (path_2_output , overwrite=True)


예제 #58
0
def efosc2_pol_phot(folder_path, apermul, fwhm):
    """ Script runs photometry for EFOSC2 polarimetry images """

    # Read in four wave plate angle files and set up arrays for later
    file_ang0 = os.path.join(folder_path, '0ang.fits')
    file_ang225 = os.path.join(folder_path, '225ang.fits')
    file_ang45 = os.path.join(folder_path, '45ang.fits')
    file_ang675 = os.path.join(folder_path, '675ang.fits')

    files = [file_ang0, file_ang225, file_ang45, file_ang675]
    angle = ['0', '225', '45', '675']
    ang_dec = ['0', '22.5', '45', '67.5']
    label = [
        '$0^{\circ}$ image', '$22.5^{\circ}$ image', '$45^{\circ}$ image',
        '$67.5^{\circ}$ image'
    ]

    # Set up array to store the number of sources per half-wave plate
    numsource = []

    # Loop over files for the four wave plate files
    for k in range(0, len(angle), 1):

        # Open fits file, extract pixel flux data and remove saturated pixels
        try:
            hdulist = fits.open(files[k])
            image_data = hdulist[0].data

        except FileNotFoundError:
            raise FileNotFoundError("Cannot find the input fits file(s).")

        # Remove bad pixels and mask edges
        image_data[image_data > 60000] = 0
        image_data[image_data < 0] = 0
        rows = len(image_data[:, 0])
        cols = len(image_data[0, :])
        hdulist.close()

        # Calculate estimate of background using sigma-clipping and calculate
        # number of pixels used in the background region that were not
        # clipped! This is done in a small area near the optical axis.
        go_bmean, go_bmedian, go_bstd = sigma_clipped_stats(
            image_data[510:568, 520:580], sigma=3.0, maxiters=5)
        ge_bmean, ge_bmedian, ge_bstd = sigma_clipped_stats(
            image_data[446:504, 520:580], sigma=3.0, maxiters=5)
        mask_o = sigma_clip(image_data[510:568, 520:580],
                            sigma=3.0,
                            maxiters=5,
                            masked=True)
        mask_e = sigma_clip(image_data[446:504, 520:580],
                            sigma=3.0,
                            maxiters=5,
                            masked=True)
        ann_area_o = np.ma.MaskedArray.count(mask_o)
        ann_area_e = np.ma.MaskedArray.count(mask_e)

        # Detect sources using DAO star finder
        daofind_o = DAOStarFinder(fwhm=5,
                                  threshold=5 * go_bstd,
                                  exclude_border=True)
        daofind_e = DAOStarFinder(fwhm=5,
                                  threshold=5 * ge_bstd,
                                  exclude_border=True)
        sources_o = daofind_o(image_data[522:552, 535:565])
        sources_e = daofind_e(image_data[462:492, 535:565])

        if (sources_o is None or sources_e is None):
            raise ValueError("No source detected in image")

        if len(sources_o) != len(sources_e):
            raise ValueError("Unequal number of sources in o and e images!")

        glob_bgm = [go_bmean, ge_bmean]
        glob_bgerr = [go_bstd, ge_bstd]

        # Convert the source centroids back into detector pixels
        sources_o['xcentroid'] = sources_o['xcentroid'] + 535
        sources_o['ycentroid'] = sources_o['ycentroid'] + 522
        sources_e['xcentroid'] = sources_e['xcentroid'] + 535
        sources_e['ycentroid'] = sources_e['ycentroid'] + 462

        # Estimate the FWHM of the source by simulating a 2D Gaussian
        # This is only done on the 0 angle image ensuring aperture sizes
        # are equal for all half-wave plate angles. If a user specified
        # FWHM is given, then the estimation is not used.
        if fwhm == 0.0:
            xpeaks_o = []
            xpeaks_e = []
            ypeaks_o = []
            ypeaks_e = []
            fwhm = []

            for i in range(0, len(sources_o), 1):
                data_o = image_data[525:550, 535:565]
                xpeaks_o.append(int(sources_o[i]['xcentroid']) - 535)
                ypeaks_o.append(int(sources_o[i]['ycentroid']) - 525)

                data_e = image_data[465:490, 535:560]
                xpeaks_e.append(int(sources_e[i]['xcentroid']) - 535)
                ypeaks_e.append(int(sources_e[i]['ycentroid']) - 465)

                min_count_o = np.min(data_o)
                min_count_e = np.min(data_e)
                max_count_o = data_o[ypeaks_o[i], xpeaks_e[i]]
                max_count_e = data_e[ypeaks_o[i], xpeaks_e[i]]
                half_max_o = (max_count_o + min_count_o) / 2
                half_max_e = (max_count_e + min_count_e) / 2

                # Crude calculation for each source
                nearest_above_x_o = ((
                    np.abs(data_o[ypeaks_o[i], xpeaks_o[i]:-1] -
                           half_max_o)).argmin())
                nearest_below_x_o = ((
                    np.abs(data_o[ypeaks_o[i], 0:xpeaks_o[i]] -
                           half_max_o)).argmin())
                nearest_above_x_e = ((
                    np.abs(data_e[ypeaks_e[i], xpeaks_e[i]:-1] -
                           half_max_e)).argmin())
                nearest_below_x_e = ((
                    np.abs(data_e[ypeaks_e[i], 0:xpeaks_e[i]] -
                           half_max_e)).argmin())
                nearest_above_y_o = ((
                    np.abs(data_o[ypeaks_o[i]:-1, xpeaks_o[i]] -
                           half_max_o)).argmin())
                nearest_below_y_o = ((
                    np.abs(data_o[0:ypeaks_o[i], xpeaks_o[i]] -
                           half_max_o)).argmin())
                nearest_above_y_e = ((
                    np.abs(data_e[ypeaks_e[i]:-1, xpeaks_e[i]] -
                           half_max_e)).argmin())
                nearest_below_y_e = ((
                    np.abs(data_e[0:ypeaks_e[i], xpeaks_e[i]] -
                           half_max_e)).argmin())
                fwhm.append(
                    (nearest_above_x_o + (xpeaks_o[i] - nearest_below_x_o)))
                fwhm.append(
                    (nearest_above_y_o + (ypeaks_o[i] - nearest_below_y_o)))
                fwhm.append(
                    (nearest_above_x_e + (xpeaks_e[i] - nearest_below_x_e)))
                fwhm.append(
                    (nearest_above_y_e + (ypeaks_e[i] - nearest_below_y_e)))

            fwhm = np.mean(fwhm)

        # Stack both ord and exord sources together
        tot_sources = vstack([sources_o, sources_e])

        # Store the ordinary and extraordinary beam source images and
        # create apertures for aperture photometry
        positions = np.swapaxes(
            np.array((tot_sources['xcentroid'], tot_sources['ycentroid']),
                     dtype='float'), 0, 1)
        aperture = CircularAperture(positions, r=0.5 * apermul * fwhm)
        phot_table = aperture_photometry(image_data, aperture)

        # Set up arrays of ord and exord source parameters
        s_id = np.zeros([len(np.array(phot_table['id']))])
        xp = np.zeros([len(s_id)])
        yp = np.zeros([len(s_id)])
        fluxbgs = np.zeros([len(s_id)])
        mean_bg = np.zeros([len(s_id)])
        bg_err = np.zeros([len(s_id)])
        s_area = []

        for i in range(0, len(np.array(phot_table['id'])), 1):
            s_id[i] = np.array(phot_table['id'][i])
            xpos = np.array(phot_table['xcenter'][i])
            ypos = np.array(phot_table['ycenter'][i])
            xp[i] = xpos
            yp[i] = ypos
            s_area.append(np.pi * (0.5 * apermul * fwhm)**2)
            j = i % 2
            fluxbgs[i] = (phot_table['aperture_sum'][i] -
                          aperture.area * glob_bgm[j])
            mean_bg[i] = glob_bgm[j]
            bg_err[i] = glob_bgerr[j]

        # Create and save the image in z scale and overplot the ordinary and
        # extraordinary apertures and local background annuli if applicable
        fig = plt.figure()
        zscale = ZScaleInterval(image_data)
        norm = ImageNormalize(stretch=SqrtStretch(), interval=zscale)
        image = plt.imshow(image_data, cmap='gray', origin='lower', norm=norm)
        bg_annulus_o = RectangularAnnulus((550, 539),
                                          w_in=0.1,
                                          w_out=60,
                                          h_out=58,
                                          theta=0)
        bg_annulus_e = RectangularAnnulus((550, 475),
                                          w_in=0.1,
                                          w_out=60,
                                          h_out=58,
                                          theta=0)
        bg_annulus_o.plot(color='skyblue', lw=1.5, alpha=0.5)
        bg_annulus_e.plot(color='lightgreen', lw=1.5, alpha=0.5)

        for i in range(0, len(np.array(phot_table['id'])), 1):
            aperture = CircularAperture((xp[i], yp[i]), r=0.5 * apermul * fwhm)

            if i < int(len(np.array(phot_table['id'])) / 2):
                aperture.plot(color='blue', lw=1.5, alpha=0.5)

            else:
                aperture.plot(color='green', lw=1.5, alpha=0.5)

        plt.xlim(500, 600)
        plt.ylim(425, 575)
        plt.title(label[k])
        image_fn = folder_path + angle[k] + '_image.png'
        fig.savefig(image_fn)

        # Create dataframes for photometry results
        cols = [
            'xpix', 'ypix', 'fluxbgs', 'sourcearea', 'meanbg', 'bgerr',
            'bgarea'
        ]
        df_o = pd.DataFrame(columns=cols)
        df_e = pd.DataFrame(columns=cols)

        for i in range(0, len(np.array(phot_table['id'])), 1):
            if 0 <= i < int(len(np.array(phot_table['id'])) / 2):
                df_o = df_o.append(
                    {
                        cols[0]: xp[i],
                        cols[1]: yp[i],
                        cols[2]: fluxbgs[i],
                        cols[3]: s_area[i],
                        cols[4]: mean_bg[i],
                        cols[5]: bg_err[i],
                        cols[6]: ann_area_o
                    },
                    ignore_index=True)

            else:
                df_e = df_e.append(
                    {
                        cols[0]: xp[i],
                        cols[1]: yp[i],
                        cols[2]: fluxbgs[i],
                        cols[3]: s_area[i],
                        cols[4]: mean_bg[i],
                        cols[5]: bg_err[i],
                        cols[6]: ann_area_e
                    },
                    ignore_index=True)

        # Save dataframes to text files
        df_o.to_string(folder_path + 'angle' + angle[k] + '_ord.txt',
                       index=False,
                       justify='left')

        df_e.to_string(folder_path + 'angle' + angle[k] + '_exord.txt',
                       index=False,
                       justify='left')

        # Save the number of sources in each beam to a list
        numsource.append(int(len(np.array(phot_table['id'])) / 2))

    # Print number of sources per half-wave plate image and FWHM
    print("FWHM =", fwhm, "pixels")
    for i in range(0, len(numsource), 1):
        print("No of sources detected at", ang_dec[i], "degrees:",
              numsource[i])

    return 0
예제 #59
0
파일: addVIzinfo.py 프로젝트: rstaten/LSS
    for it in range(0,len(tilet)):
        date = str(datet[it])
        tile = str(tilet[it])
        tt=Table.read(dirvi+tp[:3]+'/'+'desi-vi_'+tp[:3]+'_tile'+tile+'_nightdeep_merged_all_'+date+'.csv',format='pandas.csv')
        tt.keep_columns(['TARGETID','best_z','best_quality','best_spectype','all_VI_issues','all_VI_comments','merger_comment','N_VI'])
        tz = Table.read(dirz+'/'+tp+'/'+tile+'_'+tp+'zinfo.fits')
        tj = join(tz,tt,join_type='left',keys='TARGETID')
        tj['N_VI'].fill_value = 0
        tj['N_VI'] = tj['N_VI'].filled() #should easily be able to select rows with N_VI > 0 to get desired info
        tj.write(dirz+'/'+tp+'/'+tile+'_'+tp+'zinfo_wVI.fits',format='fits',overwrite=True)
        print('wrote file with VI info to '+dirz+'/'+tp+'/'+tile+'_'+tp+'zinfo_wVI.fits')
    if len(tilet) > 1:
        dt = Table.read(dirz+'/'+tp+'/'+str(tilet[0])+'_'+tp+'zinfo_wVI.fits')
        for it in range(1,len(tilet)):
            dtn = Table.read(dirz+'/'+tp+'/'+str(tilet[it])+'_'+tp+'zinfo_wVI.fits')
            dt = vstack([dt,dtn])

    
        cols = ['z','zwarn','chi2','deltachi2','spectype','subtype']
        for i in range(1,5):
            
            dt['z_'+str(i)]=np.zeros(len(dt))
            dt['zwarn_'+str(i)]=np.zeros(len(dt))
            dt['chi2_'+str(i)]=np.zeros(len(dt))
            dt['deltachi2_'+str(i)]=np.zeros(len(dt))
            dt['spectype_'+str(i)] = 'GALAXY'
            dt['subtype_'+str(i)] = 'GALAXY'
        for ii in range(0,len(dt)):
            ln = dt[ii]
            zfits = zinfo.get_zfits(ln['TILEID'],ln['PETAL_LOC'],ln['subset'],ln['TARGETID'],release)
            for jj in range(1,5):
예제 #60
0
 def vstack(tables, **kwargs):
     """
     Wraps astropy.table.vstack and returns a LineList
     """
     return LineList(table.vstack(tables, **kwargs))