def combine_karachantsev_tables(write_to_disk=False):

    dname = pth.join(SUP_DATA_DIR, "karachantsev_sfr_catalogue/")
    sfr = ascii.read(dname+"table3.dat", readme=dname+"ReadMe.txt")
    dname = pth.join(SUP_DATA_DIR, "karachantsev_list_of_galaxies/")
    phot = ascii.read(dname+"table2.dat", readme=dname+"readme.txt")
    dist = ascii.read(dname+"table6.dat", readme=dname+"readme.txt")

    wanted_col_names = ["Name", "RAh", "RAm", "RAs", "DE-", "DEd", "DEm", "DEs", "TT", "SFRa"]
    dtypes =           ["<U17", "<i4", "<i4", "<f8", "<U1", "<i4", "<i4", "<i4", "<i4", "<f8"]
    tbl = Table(names=wanted_col_names + ["KLum"] + ["DM"], dtype=dtypes+["<f8", "<f8"])

    for row in dist:
        if row["Name"] in sfr["Name"] and row["Name"] in phot["Name"]:
            sfr_ii = np.where(sfr["Name"] == row["Name"])[0][0]
            phot_ii = np.where(phot["Name"] == row["Name"])[0][0]
            vals = [sfr[sfr_ii][col] for col in wanted_col_names] + [phot[phot_ii]["KLum"]] + [row["DM"]]
            tbl.add_row(vals)

    dist_col = Column(name="Dist", data=np.round(10**(0.2*tbl["DM"]+1) * 1e-6, 3))
    sfr_col = Column(name="SFR",  data=np.round(10**(tbl["SFRa"]+tbl["KLum"]), 3))

    tbl.add_columns([dist_col, sfr_col])

    if write_to_disk:
        tbl.write(pth.join(SUP_DATA_DIR, "combined_karachentsev.dat"),
                  format="ascii.fixed_width", overwrite=True)

    return tbl
Beispiel #2
0
def create_basic_table(ra_values,
                       dec_values,
                       magnitudes,
                       location_units,
                       minimum_index=1):
    """Create astropy table containing the basic catalog info
    NOTE THAT THIS IS OUTSIDE CLASSES
    """
    basic_table = Table()

    # Add index, filename, RA, Dec or x, y columns
    index_col = Column(np.arange(minimum_index,
                                 minimum_index + len(ra_values)),
                       name='index')
    ra_col = Column(ra_values, name='x_or_RA')
    dec_col = Column(dec_values, name='y_or_Dec')
    basic_table.add_columns([index_col, ra_col, dec_col])

    # Add magnitude columns
    for key in magnitudes:
        mag_values = magnitudes[key][1]
        mag_sys = magnitudes[key][0]
        mag_column = Column(mag_values, name=key)
        basic_table.add_column(mag_column)

    # Add magnitude system and position units as metadata
    basic_table.meta['comments'] = [location_units, mag_sys]
    return basic_table
Beispiel #3
0
    def table(self):
        #@TODO: make this work for map data ?
        '''Construct the table of input Measurements, and if the fit has been run, the density, radiation field, and :math:`\chi^2` values
        
        :rtype: :class:`astropy.table.Table`
        '''
        v = self._measurements.values()
        # This only works for astropy version >= 4.1
        # and for some reason requirements.txt did not install it
        # for a test user.
        #t = Table(self._measurements,
        #          units=[m.unit for m in v]
        #          )
        t = Table()
        cols = [Column(data=d,unit=d.unit) for d in v]
        t.add_columns(cols=cols, names=[m.id for m in v])
        if self._observedratios is not None:
            v = self._observedratios.values()
            cols = [Column(data=d,unit=d.unit) for d in v]
            t.add_columns(cols=cols, names=[m.id for m in v])

        if self.radiation_field is not None:
            t.add_column(col=Column(self.radiation_field, unit=self.radiation_field_unit), name=self.radiation_field.id)

        if self.density is not None:
            t.add_column(col=Column(self.density, unit=self.density_unit), name=self.density.id)

        if self._chisq_min is not None:
            t.add_column(col=Column(self._chisq_min, unit=None), name="Chi-square")

        for j in t.columns:
            t[j].format = '3.2E'
        return t
Beispiel #4
0
    def get_r_gtan(self, write=False):
        """
        Populates self.r with radial distance of galaxies from (xc,yc)
        and self.gtan/gcross with tangential and cross ellipticities

        Failed shape measurements with g1/g2 = -999999 get filtered out
        """

        xc = self.annular_info['nfw_center'][0]
        yc = self.annular_info['nfw_center'][1]

        #wg=(self.mu>1)
        wg = (self.g1 > -2)
        print("%d galaxies were actually lensed" % len(wg.nonzero()[0]))

        g1 = self.g1[wg]
        g2 = self.g2[wg]
        x = self.x[wg]
        y = self.y[wg]

        self.r = np.sqrt(((x - xc)**2.0) + ((y - yc)**2))

        phi = np.arctan2((y - yc), (x - xc))
        g = np.sqrt(g1**2 + g2**2)
        print("## Mean g: %f " % np.mean(g))
        self.gtan = -1.0 * (g1 * np.cos(2.0 * phi) + g2 * np.sin(2.0 * phi))
        self.gcross = g1 * np.sin(2.0 * phi) + g2 * np.cos(2.0 * phi)

        if write == True:
            newtab = Table()
            newtab.add_columns([x, y, self.r, self.gtan, self.gcross],
                               names=['x', 'y', 'r', 'gcross', 'gtan'])
            newtab.write('shear_inputs.csv', format='csv', overwrite=True)
        return
Beispiel #5
0
   def make_annular_cat(self, hdu_ext=2, filter_g=True, clobber=True):
      """
      make a catalog of positions and shapes
      ready for annular.c and lensing in general
      
      Filter_g : whether to clean failed fits or not
      
      """
      
      # actually load the master coadd catalog
      try:
         coadd_cat = Table.read(self.coadd_catalog,hdu=hdu_ext)
      except:
         print("wrong coadd extension, try again?")
         pdb.set_trace()

      # do matching
      fitvd_matcher=htm.Matcher(16,ra=self.master_fitvd['ra'],dec=self.master_fitvd['dec'])
      sex_ind,fitvd_ind,dist=fitvd_matcher.match(ra=coadd_cat['ALPHAWIN_J2000'],dec=coadd_cat['DELTAWIN_J2000'],maxmatch=1,radius=5.5E-4)
      for_annular=Table()
      for_annular.add_columns([coadd_cat['ALPHAWIN_J2000'][sex_ind],coadd_cat[sex_ind]['DELTAWIN_J2000'],coadd_cat[sex_ind]['X_IMAGE'],
                               coadd_cat[sex_ind]['Y_IMAGE']])
      for_annular.add_columns([self.master_fitvd[fitvd_ind]['exp_g'][:,0],self.master_fitvd[fitvd_ind]['exp_g'][:,1]],names=['g1','g2'])
      self.annular_cat = for_annular
        
      # write to file
      if filter_g==True:
         for_annular_clean=for_annular[for_annular['g1']>-10.]
         for_annular_clean.write('fitvd-out.csv',format='ascii.csv',overwrite=clobber)
      else:
         for_annular.write('fitvd-out.csv',format='ascii.csv',overwrite=clobber)
Beispiel #6
0
Datei: spec.py Projekt: nhmc/LAE
    def fits_write(self, filename, header=None, overwrite=False): # Generate a binary FITS table
        from astropy.table import Table, Column
        """ Writes out a Spectrum, as binary FITS table - wavelength, flux, error,
        continuum.

        `overwrite` can be True or False.
        """
        # Overwrite?
        if os.path.lexists(filename) and not overwrite:
            c = raw_input('File %s exists - overwrite? (y) or n: ' % filename)
            if c != '':
                if c.strip().lower()[0] == 'n':
                    print('returning without writing anything...')
                    return
        # Generate FITS table and write
        cwa = Column(data=self.wa,name=str('wa'))
        cfl = Column(data=self.fl,name=str('fl'))
        cer = Column(data=self.er,name=str('er'))
        cco = Column(data=self.co,name=str('co'))
        sp = Table()
        #pdb.set_trace()
        sp.add_columns([cwa,cfl,cer,cco])
        sp.write(filename, format='fits',overwrite=True)
        # Save filename
        if self.filename is None:
            self.filename = filename
Beispiel #7
0
def create_salp_background():
    
    #Full sample of objects should be same length as background-corrected observed: 11719
    nobjs = 1000
    e=[]

    mlisttab = Table.read('teff_spt_table.txt',format='ascii')
    fullm = np.asarray(mlisttab['Msun'][6:97])
    for i in range(len(fullm)):
        e.append(salp_imf(fullm[i],[-2.35]))

    e = [a/max(e) for a in e]
    backg_obj_masses = choices(fullm,weights=e,k=nobjs)
    mass_col = Column(backg_obj_masses,name='Mass(msol)')

    alist = np.arange(0.25,30.25,0.25)
    av_list = np.random.choice(alist,nobjs)
    #av_list = np.random.normal(10.0,2.0,nobjs)
    av_col = Column(av_list,name='RAv')

    dlist = np.arange(10.,400.,0.5)
    d = np.random.choice(dlist,nobjs)
    d_col = Column(d,name='Dist(pc)')

    t = Table()
    t.add_columns([mass_col,av_col,d_col])
    t = add_spt(t,'Msun','Mass(msol)','b')
    return t
Beispiel #8
0
def make_grid():

    tab = Table.read('l17_files_qplot.txt',format='ascii')
    stds = tab['Typ']
    #av_dist = Table.read('serpens_extinction_probabilities/ss_av_converted_prob_density_higher_conversion.txt',format='ascii')

    #av_min = min(av_dist['Av Value'])
    #av_max = max(av_dist['Av Value'])
    av_min = 0.0
    av_max = 30.25
    av_list = np.arange(av_min,av_max+0.25,0.25)

    model = Table()
    typ,av,q_cols,H_mags = [],[],[],[]

    for i in range(len(stds)):
        print(i) 
        for j in range(len(av_list)):   
            kw,kj,kh,jw,hw,Hap =  find_wband_cols(-av_list[j],stds[i],'realfile')
            q_cols.append(round(jw + (1.85*hw),3))
            H_mags.append(round(Hap,3))
            typ.append(stds[i])
            av.append(av_list[j])
    
    typ_col = Column(typ,name='SpT')
    av_col = Column(av,name='RAv')
    H_apcol = Column(H_mags,name='H_app')
    q_col_col = Column(q_cols,name='Q(cols)')

    model.add_columns([typ_col,av_col,H_apcol,q_col_col])
    model.write('spt_av_grid_0-30_converted_av_higher.txt',format='ascii')

    return model 
def _map_ids(tbl: Table) -> dict[int, int]:
    gaia_ids = np.zeros(len(tbl), dtype=np.int64)
    cel_ids = np.zeros(len(tbl), dtype=np.uint32)
    id_idx = {}

    for i, ids in enumerate(tbl['ids']):
        for ident in ids.split('|'):
            if ident.startswith('HIP'):
                hip = int(ident[3:].strip())
                assert hip not in id_idx
                id_idx[hip] = i
                cel_ids[i] = hip
            elif ident.startswith('TYC'):
                tycs = [int(t) for t in ident[3:].strip().split('-')]
                tyc = tycs[0] + tycs[1] * 10000 + tycs[2] * 1000000000
                assert tyc not in id_idx
                id_idx[tyc] = i
                if cel_ids[i] == 0:
                    cel_ids[i] = tyc
            elif ident.startswith('Gaia DR2'):
                gaia_ids[i] = int(ident[8:].strip())

    tbl.add_columns([
        MaskedColumn(data=gaia_ids, name='gaia'),
        MaskedColumn(data=cel_ids, name='hip'),
    ])

    return id_idx
Beispiel #10
0
def readStack(stack,line):
    cols=[]
    nrow=len(stack[line+'_stack_sum'])
    rows=[]
    for key in stack.keys():
        data=stack[key]
        if key=='spectral_axis':
            col=Column(name=key,dtype=np.float, shape=data.shape)
        elif key == 'bin_type' or key=='bin_unit':
            col=Column(name=key,dtype='U10')
        elif key == line+'_stack_profile':
            col=Column(name=key, dtype=np.float, shape=data.shape[1])
        else:
            col=Column(name=key, dtype=np.float)
        cols.append(col)
    tab=Table()
    tab.add_columns(cols)
    for i in range(nrow):
        row=[]
        for key in stack.keys():
            data=stack[key]
            if key=='spectral_axis' or key=='bin_type' or key=='bin_unit':
                r=data
            elif key==line+'_stack_profile':
                r=data[i,:]
            else:
                r=data[i]
            row.append(r)
        tab.add_row(row)
    return tab
Beispiel #11
0
def add_spec(data):
    tab = Table(data)
    names = ['TEFF_SPEC', 'LOGG_SPEC']
    newcols = Table(np.zeros([len(tab), len(names)]) - 9999., names=names)
    tab.add_columns(newcols.columns.values())
    tab['TEFF_SPEC'] = tab['FPARAM'][:, 0]
    tab['LOGG_SPEC'] = tab['FPARAM'][:, 1]

    return (tab)
def write_index_file(info_table):
    """
    Write a summary index file in tabular format from ``info_table`` dict, e.g.

        updated_file       baseline_file           date       dy_acis_i dz_acis_i dy_acis_s dz_acis_s
    ------------------- ------------------- ----------------- --------- --------- --------- ---------
    CHARACTERIS_12OCT15 CHARACTERIS_12MAR15 2015:285:01:21:25     -5.00     10.00     15.00    -20.00
    """
    index_file = os.path.join(opt.data_root, 'characteristics', 'index')
    if os.path.exists(index_file):
        index = Table.read(index_file, format='ascii.fixed_width_two_line', guess=False)
        matching = index['updated_file'] == info_table['updated_file']
        index = index[~matching]
        if np.any(matching):
            logger.info('WARNING: replacing existing entry for updated_file={}'
                        .format(info_table['updated_file']))
        shutil.copy(index_file, index_file + '.bak')
    else:
        index = Table(names=['updated_file', 'baseline_file', 'date',
                             'dy_acis_i', 'dz_acis_i', 'dy_acis_s', 'dz_acis_s'],
                      dtype=['S19', 'S19', 'S17', 'f', 'f', 'f', 'f'])
    index.add_row(info_table)
    for colname in ('dy_acis_i', 'dz_acis_i', 'dy_acis_s', 'dz_acis_s'):
        index[colname].format = '.2f'

    logger.info('Writing index file {}'.format(index_file))
    index.write(index_file, format='ascii.fixed_width_two_line')

    # Write an HTML index file as a table
    def self_link(vals):
        """Turn vals into a list of self-referenced links"""
        return ['<a href="{0}">{0}</a>'.format(val) for val in vals]

    # Turn table entries into HTML links to same
    diffs = self_link(x + '_diff.html' for x in index['updated_file'])
    jsons = self_link(x + '.json' for x in index['updated_file'])
    updated_files = self_link(index['updated_file'])
    baseline_files = self_link(index['baseline_file'])
    index.remove_columns(['updated_file', 'baseline_file'])
    index.add_columns([Column(updated_files, name='updated_file'),
                       Column(baseline_files, name='baseline_file'),
                       Column(jsons, name='JSON_info'),
                       Column(diffs, name='diff')],
                      [0, 0, 0, 0])

    index_file = index_file + '.html'
    logger.info('Writing index.html file {}'.format(index_file))
    if os.path.exists(index_file):
        shutil.copy(index_file, index_file + '.bak')
    index.write(index_file, format='ascii.html')

    # Hack: undo the HTML escaping that table write does.
    # TODO: just do the whole thing with jinja template.
    lines = (re.sub(r'&gt;', '>', line) for line in open(index_file, 'r'))
    lines = [re.sub(r'&lt;', '<', line) for line in lines]
    with open(index_file, 'w') as fh:
        fh.writelines(lines)
Beispiel #13
0
    def add_gaussian_sources(self, within=(0, 1), cat_gen=pos_uniform, **kwargs):
        """Add gaussian sources into the map.

        Parameters
        ----------
        within : tuple of 2 int
            force the sources within this relative range in the map
        cat_gen : function (`pos_uniform`|`pos_gridded`|`pos_list`|...)
            the function used to generate the pixel positions and flux of the sources (see Notes below)
        **kwargs
            any keyword arguments to be passed to the `cat_gen` function

        Notes
        -----
        the `cat_gen` function is used to generate the list of x, y pixel positions and fluxes
        and must at least support the `shape=None, within=(0, 1), mask=None` arguments.
        """
        shape = self.shape

        x_mean, y_mean, peak_flux = cat_gen(shape=shape, within=within, mask=self.mask, **kwargs)

        nsources = x_mean.shape[0]

        sources = Table(masked=True)

        sources["amplitude"] = peak_flux.to(self.unit * u.beam)

        sources["x_mean"] = x_mean
        sources["y_mean"] = y_mean

        sources["x_stddev"] = np.ones(nsources) * self.beam.sigma_pix.value
        sources["y_stddev"] = np.ones(nsources) * self.beam.sigma_pix.value
        sources["theta"] = np.zeros(nsources)

        # Crude check to be within the finite part of the map
        if self.mask is not None:
            within_coverage = ~self.mask[sources["y_mean"].astype(int), sources["x_mean"].astype(int)]
            sources = sources[within_coverage]

        # Gaussian sources...
        self._data += make_gaussian_sources_image(shape, sources)

        # Add an ID column
        sources.add_column(Column(np.arange(len(sources)), name="fake_id"), 0)

        # Transform pixel to world coordinates
        a, d = self.wcs.pixel_to_world_values(sources["x_mean"], sources["y_mean"])
        sources.add_columns([Column(a * u.deg, name="ra"), Column(d * u.deg, name="dec")])

        sources["_ra"] = sources["ra"]
        sources["_dec"] = sources["dec"]

        # Remove unnecessary columns
        sources.remove_columns(["x_mean", "y_mean", "x_stddev", "y_stddev", "theta"])

        self.fake_sources = sources
Beispiel #14
0
def build_data_array(dirname):
    colnames = ['PA', 'ELLIP', 'STOP']
    buffsize = 100
    d = Table()
    d.add_column(Column(name='SMA', data=arange(0, buffsize*2, 2)))
    for i, fn in enumerate(glob(dirname+'/*.dat')):
        t = read_ellipse_output(fn)
        if i is 0:
            d.add_columns([Column(name=name, data=zeros([buffsize, 100])-99) for name in colnames])
        assert array_equiv(d['SMA'][:len(t)].data, t['SMA'].data), "SMA array do not match"
        for col in colnames:
            d[col][:len(t),i] = t[col]
    return d
Beispiel #15
0
    def test_quick_add_keys_records_history(self, keyword_arg, file_arg,
                                            file_column):
        ic = ImageFileCollection(self.test_dir.strpath, keywords=['imagetyp'])
        ic.summary.keep_columns('file')

        file_list = os.path.join(ic.location, 'files.txt')
        keyword_list = os.path.join(ic.location, 'keys.txt')

        full_paths = [
            os.path.join(self.test_dir.strpath, fil)
            for fil in ic.summary['file']
        ]
        print('fill paths: %s' % ' '.join(full_paths))
        ic.summary['file'][:] = full_paths
        ic.summary.remove_column('file')
        ic.summary.add_column(Column(data=full_paths, name=file_column))
        ic.summary.write(file_list, format='ascii')
        if file_column != 'file':
            ic.summary.rename_column(file_column, 'file')
        dumb_keyword = 'munkeez'.upper()
        dumb_value = 'bananaz'
        keywords = Column(data=[dumb_keyword], name='Keyword')
        vals = Column(data=[dumb_value], name='value')
        keyword_table = Table()
        keyword_table.add_columns([keywords, vals])
        keyword_table.write(keyword_list, format='ascii')
        args_for = {}
        args_for['--key-file'] = [keyword_list]
        args_for['--key-value'] = [dumb_keyword, dumb_value]
        args_for['--file-list'] = [file_list]
        args_for[''] = full_paths
        argslist = [keyword_arg]
        argslist.extend(args_for[keyword_arg])
        if file_arg:
            argslist.append(file_arg)
        argslist.extend(args_for[file_arg])
        if file_column.lower() != 'file' and file_arg:
            with pytest.raises(ValueError):
                quick_add_keys_to_file.main(argslist)
            return
        else:
            quick_add_keys_to_file.main(argslist)


#        add_keys(file_list=file_list, key_file=keyword_list)
        for header in ic.headers():
            assert (header[dumb_keyword] == dumb_value)
            history_string = ' '.join(header['history'])
            assert (dumb_keyword in history_string)
            assert (dumb_value in history_string)
Beispiel #16
0
def output_j19_comparison(sbid, gaskap_table, j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched, title, filename, match_cat=None): 
    print (title, filename)

    gaskap_targets = gaskap_table[j19_match]
    j19_targets = j19_table[idx_j19]
    j19_targets = j19_targets[j19_match]
    sort_order = gaskap_targets.argsort(['comp_name'])
    #comp_names = sorted(targets['comp_name'])
    gaskap_tgt_ordered = gaskap_targets[sort_order]
    j19_tgt_ordered = j19_targets[sort_order]

    with open(filename, 'w') as f:
        output_header(f, title)

        for rating in 'ABCDEF':
            mask = gaskap_tgt_ordered['rating']==rating
            subset = gaskap_tgt_ordered[mask]
            j19_subset = j19_tgt_ordered[mask]
            print('Rating {} has {} spectra'.format(rating, len(subset)))

            output_block_title(f, rating, rating=='A', len(subset))

            for idx, gaskap_src in enumerate(subset):
                gaskap_name  = gaskap_src['comp_name']
                j19_name = j19_subset[idx]['Source']
                output_j19_img(f, gaskap_name, j19_name, rating)

        # Add a section for missed spectra
        output_block_title(f, None, False, len(j19_unmatched))
        for row in j19_unmatched:
            gaskap_name  = row['closest_gaskap']
            j19_name = row['Source']
            output_j19_img(f, gaskap_name, j19_name, rating, sep=row['gaskap_sep'])

        output_footer(f)

    if match_cat:
        augmented_table = Table(gaskap_tgt_ordered)
        close_j19_comp_names = j19_tgt_ordered['Source']
        col_closest = Column(name='closest_j19', data=close_j19_comp_names)
        col_gaskap_ra = Column(name='j19_ra', data=j19_tgt_ordered['ra']*u.deg)
        col_gaskap_dec = Column(name='j19_dec', data=j19_tgt_ordered['dec']*u.deg)
        sep_vals = d2d_j19[j19_match]
        sep_vals_sorted = sep_vals[sort_order]
        col_sep = Column(name='j19_sep', data=sep_vals_sorted.to(u.arcsec))
        augmented_table.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep])
        #print (augmented_table)
        j19_match_vo_table = from_table(augmented_table)
        writeto(j19_match_vo_table, match_cat)
Beispiel #17
0
    def test_quick_add_keys_records_history(self, keyword_arg,
                                            file_arg, file_column):
        ic = ImageFileCollection(self.test_dir.strpath,
                                 keywords=['imagetyp'])
        ic.summary_info.keep_columns('file')

        file_list = os.path.join(ic.location, 'files.txt')
        keyword_list = os.path.join(ic.location, 'keys.txt')

        full_paths = [os.path.join(self.test_dir.strpath, fil) for
                      fil in ic.summary_info['file']]
        print('fill paths: %s' % ' '.join(full_paths))
        ic.summary_info['file'][:] = full_paths
        ic.summary_info.remove_column('file')
        ic.summary_info.add_column(Column(data=full_paths, name=file_column))
        ic.summary_info.write(file_list, format='ascii')
        if file_column != 'file':
            ic.summary_info.rename_column(file_column, 'file')
        dumb_keyword = 'munkeez'.upper()
        dumb_value = 'bananaz'
        keywords = Column(data=[dumb_keyword], name='Keyword')
        vals = Column(data=[dumb_value], name='value')
        keyword_table = Table()
        keyword_table.add_columns([keywords, vals])
        keyword_table.write(keyword_list, format='ascii')
        args_for = {}
        args_for['--key-file'] = [keyword_list]
        args_for['--key-value'] = [dumb_keyword, dumb_value]
        args_for['--file-list'] = [file_list]
        args_for[''] = full_paths
        argslist = [keyword_arg]
        argslist.extend(args_for[keyword_arg])
        if file_arg:
            argslist.append(file_arg)
        argslist.extend(args_for[file_arg])
        if file_column.lower() != 'file' and file_arg:
            with pytest.raises(ValueError):
                quick_add_keys_to_file.main(argslist)
            return
        else:
            quick_add_keys_to_file.main(argslist)

#        add_keys(file_list=file_list, key_file=keyword_list)
        for header in ic.headers():
            assert (header[dumb_keyword] == dumb_value)
            history_string = ' '.join(header['history'])
            assert (dumb_keyword in history_string)
            assert (dumb_value in history_string)
def make_output_table(target_list, wds_id_list):
    """Construct an astropy Table from the target ID
    list and the filtered WDS component list
    """
    output_table = Table()
    target_column = Column(
        data=target_list,
        description='Original user-specified target identifier',
        name='Star',
        format='{}')
    wds_column = Column(data=wds_id_list,
                        description='WDS identifiers for selected components',
                        name='WDS',
                        format='{}')
    output_table.add_columns(cols=[target_column, wds_column])
    return output_table
Beispiel #19
0
def make_flux_table(source_tbl, src, wave_min, wave_max, area):
    fluxes = np.zeros(len(src.spectra))
    ref_set = list(set(source_tbl["ref"]))
    flux_set = src.photons_in_range(wave_min, wave_max, area, ref_set)
    fluxes[ref_set] = flux_set

    ref = source_tbl["ref"]
    weight = source_tbl["weight"]
    flux_col = Column(name="flux", data=fluxes[ref] * weight)
    x_col = source_tbl["x"]
    y_col = source_tbl["y"]

    tbl = Table()
    tbl.add_columns([x_col, y_col, flux_col])

    return tbl
Beispiel #20
0
class Parser(object):
    """Read in a MOP formatted file"""

    def __init__(self, expnum, ccd, extension, type='p', prefix=None):
        """does nothing"""

        self.expnum = expnum
        self.ccd = ccd
        self.extension = extension
        self.type = type
        self.prefix = prefix
        self.fobj = None
        self.keywords  = []
        self.values = []
        self.formats = {}
        self.header = {}
        self.cdata ={}
        

    def parse(self):
        """read in a file and return a MOPFile object."""
        self.filename =  storage.get_file(self.expnum,
                                       self.ccd,
                                       ext=self.extension,
                                       version=self.type,
                                       prefix=self.prefix)
        self.fobj = open(self.filename,'r')
        lines = self.fobj.read().split('\n')
        self.header = HeaderParser(self.extension).parser(lines)
        if 'matt' in self.extension:
            usecols=[0,1,2,3,4]
            data = numpy.genfromtxt(self.filename, usecols=usecols)
        else:
            data = numpy.genfromtxt(self.filename)
        self.data = Table(data, names=self.header.column_names[0:data.shape[1]])
        ast_header = storage.get_astheader(self.expnum, self.ccd)
        self.wcs = wcs.WCS(ast_header)
        flip_these_extensions = range(1,19)
        flip_these_extensions.append(37)
        flip_these_extensions.append(38)
        if self.ccd + 1 in flip_these_extensions:
            self.data['X'] = float(self.header.keywords['NAX1'][0])-self.data['X'] + 1
            self.data['Y'] = float(self.header.keywords['NAX2'][0])-self.data['Y'] + 1
        ra, dec = self.wcs.xy2sky(self.data['X'], self.data['Y'], usepv=True)

        self.data.add_columns([Column(ra, name='RA_J2000'), Column(dec, name='DE_J2000')])
        return self
Beispiel #21
0
def _write_ref_ISM_table():
    """ Write a reference table enabling faster I/O for ISM-related lists

    For developer use only.

    Note that after running this, you need to manually copy the table
    produced to linetools/data/lines/ISM_table.fits inside the github
    repository, and then check it in.
    """
    from linetools.lists.linelist import LineList

    ism = LineList('ISM', use_ISM_table=False)
    strong = LineList('Strong', use_ISM_table=False)
    euv = LineList('EUV', use_ISM_table=False)
    hi = LineList('HI', use_ISM_table=False)

    # need a Table, not QTable to write
    tab = Table(ism._data.copy())
    tab.sort(('wrest'))

    # Using np.in1d doesn't work for some reason. Do it the long way
    cond = []
    for table in (strong, euv, hi):
        igood = []
        for row in Table(table._data):
            ind = tab['wrest'].searchsorted(row['wrest'])
            dw = abs(tab['wrest'][ind] - row['wrest'])
            if abs(tab['wrest'][ind+1] - row['wrest']) < dw:
                ind = ind + 1
            rism = tab[ind]
            # check this is the right row.
            if all(row[k] == rism[k] for k in hi._data.colnames if
                   not hasattr(row[k], 'mask') or not row[k].mask):
                igood.append(ind)
            else:
                raise RuntimeError('No match found!')

        cond.append(np.zeros(len(tab), dtype=bool))
        cond[-1][np.array(igood)] = True

    col1 = Column(cond[0], name='is_Strong')
    col2 = Column(cond[1], name='is_EUV')
    col3 = Column(cond[2], name='is_HI')
    tab.add_columns([col1, col2, col3])

    tab.write('ISM_table.fits', overwrite=True)
Beispiel #22
0
def _write_ref_ISM_table():
    """ Write a reference table enabling faster I/O for ISM-related lists

    For developer use only.

    Note that after running this, you need to manually copy the table
    produced to linetools/data/lines/ISM_table.fits inside the github
    repository, and then check it in.
    """
    from linetools.lists.linelist import LineList

    ism = LineList('ISM', use_ISM_table=False)
    strong = LineList('Strong', use_ISM_table=False)
    euv = LineList('EUV', use_ISM_table=False)
    hi = LineList('HI', use_ISM_table=False)

    # need a Table, not QTable to write
    tab = Table(ism._data.copy())
    tab.sort(('wrest'))

    # Using np.in1d doesn't work for some reason. Do it the long way
    cond = []
    for table in (strong, euv, hi):
        igood = []
        for row in Table(table._data):
            ind = tab['wrest'].searchsorted(row['wrest'])
            dw = abs(tab['wrest'][ind] - row['wrest'])
            if abs(tab['wrest'][ind + 1] - row['wrest']) < dw:
                ind = ind + 1
            rism = tab[ind]
            # check this is the right row.
            if all(row[k] == rism[k] for k in hi._data.colnames
                   if not hasattr(row[k], 'mask') or not row[k].mask):
                igood.append(ind)
            else:
                raise RuntimeError('No match found!')

        cond.append(np.zeros(len(tab), dtype=bool))
        cond[-1][np.array(igood)] = True

    col1 = Column(cond[0], name='is_Strong')
    col2 = Column(cond[1], name='is_EUV')
    col3 = Column(cond[2], name='is_HI')
    tab.add_columns([col1, col2, col3])

    tab.write('ISM_table.fits', overwrite=True)
Beispiel #23
0
def list_filters():
    """Print the list of filters in the pcigale database.
    """
    with Database() as base:
        filters = {name: base.get_filter(name) for name in
                   base.get_filter_names()}

    name = Column(data=[filters[f].name for f in filters], name='Name')
    description = Column(data=[filters[f].description for f in filters],
                         name='Description')
    wl = Column(data=[filters[f].effective_wavelength for f in filters],
                name='Effective Wavelength', unit=u.nm, format='%d')
    filter_type = Column(data=[filters[f].trans_type for f in filters],
                         name='Type')
    samples = Column(data=[filters[f].trans_table[0].size for f in filters],
                     name="Points")

    t = Table()
    t.add_columns([name, description, wl, filter_type, samples])
    t.sort(['Effective Wavelength'])
    t.pprint(max_lines=-1, max_width=-1)
Beispiel #24
0
def tableFill(dam, ra, dec, appender, nme):
    t = Table(None)
    Am = Column(name='Arcminute')
    North = Column(name='North')
    East = Column(name='East')
    South = Column(name='South')
    West = Column(name='West')
    t.add_columns([Am, North, East, South, West])
    tA_v = []
    curVal = [None] * 4  #n = 0, e = 1, s = 2, w = 3
    coord = [None] * 4  #n = 0, e = 1, s = 2, w = 3
    #get values for each arcminute
    for j in range(0, dam + 1):
        fourCoord(j, ra, dec, coord)
        t.add_row()
        t[j][0] = j
        for i in range(0, 4):
            C = coordinates.SkyCoord(coord[i])
            table = IrsaDust.get_extinction_table(C.fk5, show_progress=False)
            curVal[i] = (table['A_SandF'][2])
            t[j][i + 1] = curVal[i]
            curVal = curVal[:]
        tA_v.append(curVal)

    t.add_row()
    for i in range(
            0, 5):  #this adds a blank line to the table to separate queries
        t[j + 1][i] = None
    n = [nme]
    namesTable = Table([n], names=('n'))
    final_name = namesTable.to_pandas()
    final_vals = t.to_pandas()
    from pandas import ExcelWriter
    with open('A_v Values.csv', appender) as f:
        final_name.to_csv(f, header=False, index=False)
    appender = 'a'
    with open('A_v Values.csv', appender) as f:
        final_vals.to_csv(f, header=True, index=False, sep=',')
    return (tA_v)  #gets the data from IRSA database and stores A_v in array
def get_ellipticities(incat,all_ind,truth_cat,truth_ind):

    
    e1= (incat['X2_IMAGE']-incat['Y2_IMAGE']) / (incat['X2_IMAGE']+incat['Y2_IMAGE'])
    e2=-2.0*incat['XY_IMAGE']/(incat['X2_IMAGE']+incat['Y2_IMAGE'])
    ellip=np.sqrt(e1**2+e2**2)
    
    e1=e1[all_ind]
    e1.colname='$e_1$'
    e2=e2[all_ind]
    e2.colname='$e_2$'
    ellip=np.sqrt(e1**2+e2**2)
    ellip.colname='$e$'
    ra = incat['ALPHAWIN_J2000'][all_ind]; dec = incat['DELTAWIN_J2000'][all_ind]
    x = incat['X_IMAGE'][all_ind]; y=incat['Y_IMAGE'][all_ind]
    fwhm = incat['FWHM_IMAGE'][all_ind]
    mag = incat['MAG_AUTO'][all_ind]
    flux = incat['FLUX_AUTO'][all_ind]
    flux_rad = incat['FLUX_RADIUS'][all_ind]

    # copy over relevant quantities from truth catalog as well!
    truth_fwhm = truth_cat[truth_ind]['mom_size']*2.355
    truth_flux = truth_cat[truth_ind]['flux']
    truth_g1_meas = truth_cat[truth_ind]['g1_meas']
    truth_g2_meas = truth_cat[truth_ind]['g2_meas']
    z =truth_cat[truth_ind]['redshift']
    
    
    # also add fwhm and mag, useful quantities for stars,

    ellip_tab=Table()
    ellip_tab.add_columns([ra, dec, x, y, e1,e2,ellip,truth_g1_meas,truth_g2_meas,
                               fwhm,flux_rad, mag,flux,truth_fwhm, truth_flux,z],
                               names=['ra','dec','x', 'y', 'e1','e2','e','g1_meas','g2_meas',
                                         'fwhm','flux_rad','mag','flux','truth_fwhm','truth_flux','redshift'])
   

    return ellip_tab
Beispiel #26
0
    def save(self, save_path: Path = '.'):
        save_path = Path(save_path)
        npar = len(self.ps)

        if self.de:
            de = xa.DataArray(self.de.population, dims='pvector parameter'.split(), coords={'parameter': self.ps.names})
        else:
            de = None

        if self.sampler is not None:
            mc = xa.DataArray(self.sampler.chain, dims='pvector step parameter'.split(),
                              coords={'parameter': self.ps.names}, attrs={'ndim': npar, 'npop': self.sampler.nwalkers})
        else:
            mc = None

        ds = xa.Dataset(data_vars={'de_population': de, 'mcmc_samples': mc},
                        attrs={'created': strftime('%Y-%m-%d %H:%M:%S'), 'name': self.name})
        ds.to_netcdf(save_path.joinpath(f'{self.name}.nc'))

        try:
            if self.sampler is not None:
                fname = save_path / f'{self.name}.fits'
                chains = self.sampler.chain
                nchains = chains.shape[0]
                nsteps = chains.shape[1]
                idch = repeat(arange(nchains), nsteps)
                idst = tile(arange(nsteps), nchains)
                flc = chains.reshape([-1, chains.shape[2]])
                tb1 = Table([idch, idst], names=['chain', 'step'])
                tb1.add_columns(flc.T, names=self.ps.names)
                tb2 = Table([idch, idst], names=['chain', 'step'])
                tb2.add_column(self.sampler.lnprobability.ravel(), name='lnp')
                tbhdu1 = pf.BinTableHDU(tb1, name='posterior')
                tbhdu2 = pf.BinTableHDU(tb2, name='sample_stats')
                hdul = pf.HDUList([pf.PrimaryHDU(), tbhdu1, tbhdu2])
                hdul.writeto(fname, overwrite=True)
        except ValueError:
            print('Could not save the samples in fits format.')
Beispiel #27
0
def match_planted(fk_candidate_observations,
                  match_filename,
                  bright_limit=BRIGHT_LIMIT,
                  object_planted=OBJECT_PLANTED,
                  minimum_bright_detections=MINIMUM_BRIGHT_DETECTIONS,
                  bright_fraction=MINIMUM_BRIGHT_FRACTION):
    """
    Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
    planted sources with found sources.

    The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
    first exposure as read from the .astrom file.

    :param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
    :param match_filename: a file that will contain a list of all planted sources and the matched found source
    @param minimum_bright_detections: if there are too few bright detections we raise an error.

    """

    found_pos = []
    detections = fk_candidate_observations.get_sources()
    for detection in detections:
        reading = detection.get_reading(0)
        # create a list of positions, to be used later by match_lists
        found_pos.append([reading.x, reading.y])

    # Now get the Object.planted file, either from the local FS or from VOSpace.
    objects_planted_uri = object_planted
    if not os.access(objects_planted_uri, os.F_OK):
        objects_planted_uri = fk_candidate_observations.observations[
            0].get_object_planted_uri()
    try:
        lines = storage.open_vos_or_local(objects_planted_uri)
        lines = lines.read().decode('utf-8')
    except Exception as ex:
        logging.critical(f'{ex}')
        print(lines)
        raise ex

    # we are changing the format of the Object.planted header to be compatible with astropy.io.ascii but
    # there are some old Object.planted files out there so we do these string/replace calls to reset those.
    new_lines = lines.replace("pix rate", "pix_rate")
    new_lines = new_lines.replace("""''/h rate""", "sky_rate")
    planted_objects_table = ascii.read(new_lines,
                                       header_start=-1,
                                       data_start=0)
    planted_objects_table.meta = None
    # The match_list method expects a list that contains a position, not an x and a y vector, so we transpose.
    planted_pos = numpy.transpose(
        [planted_objects_table['x'].data, planted_objects_table['y'].data])
    # match_idx is an order list.  The list is in the order of the first list of positions and each entry
    # is the index of the matching position from the second list.
    (match_idx, match_fnd) = util.match_lists(numpy.array(planted_pos),
                                              numpy.array(found_pos))
    assert isinstance(match_idx, numpy.ma.MaskedArray)
    assert isinstance(match_fnd, numpy.ma.MaskedArray)
    false_positives_table = Table()
    # Once we've matched the two lists we'll need some new columns to store the information in.
    # these are masked columns so that object.planted entries that have no detected match are left 'blank'.
    new_columns = [
        MaskedColumn(name="measure_x",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_y",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_rate",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_angle",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_mag1",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_merr1",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_mag2",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_merr2",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_mag3",
                     length=len(planted_objects_table),
                     mask=True),
        MaskedColumn(name="measure_merr3",
                     length=len(planted_objects_table),
                     mask=True)
    ]
    planted_objects_table.add_columns(new_columns)
    tlength = 0
    new_columns = [
        MaskedColumn(name="measure_x", length=tlength, mask=True),
        MaskedColumn(name="measure_y", length=tlength, mask=True),
        MaskedColumn(name="measure_rate", length=0, mask=True),
        MaskedColumn(name="measure_angle", length=0, mask=True),
        MaskedColumn(name="measure_mag1", length=0, mask=True),
        MaskedColumn(name="measure_merr1", length=0, mask=True),
        MaskedColumn(name="measure_mag2", length=0, mask=True),
        MaskedColumn(name="measure_merr2", length=0, mask=True),
        MaskedColumn(name="measure_mag3", length=tlength, mask=True),
        MaskedColumn(name="measure_merr3", length=tlength, mask=True)
    ]
    false_positives_table.add_columns(new_columns)

    # We do some 'checks' on the Object.planted match to diagnose pipeline issues.  Those checks are made using just
    # those planted sources we should have detected.
    bright = planted_objects_table['mag'] < bright_limit
    n_bright_planted = numpy.count_nonzero(
        planted_objects_table['mag'][bright])

    measures = []
    idxs = []
    for idx in range(len(match_idx)):
        # The match_idx value is False if nothing was found.
        if not match_idx.mask[idx]:
            # Each 'source' has multiple 'readings'
            measures.append(detections[match_idx[idx]].get_readings())
            idxs.append(idx)

    observations = measure_mags(measures)

    for oidx in range(len(measures)):
        idx = idxs[oidx]
        readings = measures[oidx]
        start_jd = Time(readings[0].obs.header['MJD_OBS_CENTER'],
                        format='mpc',
                        scale='utc').jd
        end_jd = Time(readings[-1].obs.header['MJD_OBS_CENTER'],
                      format='mpc',
                      scale='utc').jd
        rate = math.sqrt(
            (readings[-1].x - readings[0].x)**2 +
            (readings[-1].y - readings[0].y)**2) / (24 * (end_jd - start_jd))
        rate = int(rate * 100) / 100.0
        angle = math.degrees(
            math.atan2(readings[-1].y - readings[0].y,
                       readings[-1].x - readings[0].x))
        angle = int(angle * 100) / 100.0
        planted_objects_table[idx]['measure_rate'] = rate
        planted_objects_table[idx]['measure_angle'] = angle
        planted_objects_table[idx]['measure_x'] = observations[
            readings[0].obs]['mags']["XCENTER"][oidx]
        planted_objects_table[idx]['measure_y'] = observations[
            readings[0].obs]['mags']["YCENTER"][oidx]
        for ridx in range(len(readings)):
            reading = readings[ridx]
            mags = observations[reading.obs]['mags']
            planted_objects_table[idx]['measure_mag{}'.format(
                ridx + 1)] = mags["MAG"][oidx]
            planted_objects_table[idx]['measure_merr{}'.format(
                ridx + 1)] = mags["MERR"][oidx]

    # for idx in range(len(match_fnd)):
    #     if match_fnd.mask[idx]:
    #         measures = detections[idx].get_readings()
    #         false_positives_table.add_row()
    #         false_positives_table[-1] = measure_mags(measures, false_positives_table[-1])

    # Count an object as detected if it has a measured magnitude in the first frame of the triplet.
    n_bright_found = numpy.count_nonzero(
        planted_objects_table['measure_mag1'][bright])
    # Also compute the offset and standard deviation of the measured magnitude from that planted ones.
    offset = numpy.mean(planted_objects_table['mag'][bright] -
                        planted_objects_table['measure_mag1'][bright])
    try:
        offset = "{:5.2f}".format(offset)
    except:
        offset = "indef"

    std = numpy.std(planted_objects_table['mag'][bright] -
                    planted_objects_table['measure_mag1'][bright])
    try:
        std = "{:5.2f}".format(std)
    except:
        std = "indef"

    if os.access(match_filename, os.R_OK):
        fout = open(match_filename, 'a')
    else:
        fout = open(match_filename, 'w')

    fout.write("#K {:10s} {:10s}\n".format("EXPNUM", "FWHM"))
    for measure in detections[0].get_readings():
        fout.write('#V {:10s} {:10s}\n'.format(measure.obs.header['EXPNUM'],
                                               measure.obs.header['FWHM']))

    fout.write("#K ")
    for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
        fout.write("{:10s} ".format(keyword))
    fout.write("\n")

    fout.write("#V ")
    for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
        fout.write("{:10s} ".format(
            fk_candidate_observations.sys_header[keyword]))
    fout.write("\n")

    fout.write("#K ")
    for keyword in ["NBRIGHT", "NFOUND", "OFFSET", "STDEV"]:
        fout.write("{:10s} ".format(keyword))
    fout.write("\n")
    fout.write("#V {:<10} {:<10} {:<10} {:<10}\n".format(
        n_bright_planted, n_bright_found, offset, std))

    try:
        writer = ascii.FixedWidth
        # add a hash to the start of line that will have header columns: for JMP
        fout.write("# ")
        fout.flush()
        ascii.write(planted_objects_table,
                    output=fout,
                    Writer=writer,
                    delimiter=None)
        if len(false_positives_table) > 0:
            with open(match_filename + ".fp", 'a') as fpout:
                fpout.write("#")
                ascii.write(false_positives_table,
                            output=fpout,
                            Writer=writer,
                            delimiter=None)
    except Exception as e:
        logging.error(str(e))
        raise e
    finally:
        fout.close()

    # Some simple checks to report a failure how we're doing.
    if n_bright_planted < minimum_bright_detections:
        raise RuntimeError(1, "Too few bright objects planted.")

    if n_bright_found / float(n_bright_planted) < bright_fraction:
        raise RuntimeError(2, "Too few bright objects found.")

    return "{} {} {} {}".format(n_bright_planted, n_bright_found, offset, std)
Beispiel #28
0
def main():
	epilog='''The SCUSS pipeline is designed for images taken by the Bok Telescope.
	It make photometry for both SDSS detected objects and Bok-only sources. 
	The photometric methods include automatic aperture, circular aperture, PSF 
	and model photometry. The model photometry can use SDSS shape parameters for 
	force photometry or use self-guessed parameters to refit. Two models are 
	Dev (1/4) and Exp laws. Gain and read-out noise is read from the FITS header.
	if stacked image, only weight image should provided.
	if single image, only flag should be provided.
	if weight, flag, psfex, sdss is empty, the program will automatically 
	find or calculate them'''

	parser = argparse.ArgumentParser(description='SCUSS Photometric Pipeline.',
			epilog=epilog,fromfile_prefix_chars='@')
	parser.add_argument('fits', metavar='FITS', type=str, nargs='*',
			help='Bok calibrated images to do photometry. @filelist to read from file')
	parser.add_argument('--stacked', action='store_true',
			help='indicate FITS are stacked image')
	parser.add_argument('-f','--flag',type=str,nargs='*',default=[],
			help='flag image (bad pixels). @filelist to read from file')
	parser.add_argument('-w','--weight',type=str,nargs='*',default=[],
			help='weight image (exposure times for mosaics). @filelist to read from file')
	parser.add_argument('--psfex',type=str,nargs='*',metavar='psfex',default=[],
			help='PSF file derived by PSFEx')
	parser.add_argument('--phot-sdss', action='store_true',
			help='photometry with SDSS detections with automatic parameters')
	parser.add_argument('--force-sdss', action='store_true',
			help='force SDSS shape paramters on Bok image.')
	parser.add_argument('--force-filter',type=str,default=None, 
			help='Force Photometry with SDSS filter shape parameters, default None (use the same filter as the image')
	parser.add_argument('--save-sky', action='store_true',
			help='save the 2d sky background and rms map')
	parser.add_argument('--save-segment', action='store_true',
			help='save the 2d sky background and rms map')
	parser.add_argument('--save-model', action='store_true',
			help='save the fitted model and residual image')
	parser.add_argument('--plot', action='store_true',
			help='plot all kinds of corrections in the catalog')
	parser.add_argument('--nthread',type=int,default=1,
			help='number of threads to parallel processing, default 1')
	parser.add_argument('--version', action='version',version='%(prog)s 1.0')
	# parse arguments
	args = parser.parse_args()
	
	###CCD parameters
	saturate=30000.0
	gain=1.375
	rdnoise=8.0
	## detecting and analysis
	detect_nsigma=1.0
	aly_nsigma=1.5
	conv_sigma=1.2 # not using seeing FWHM/2.35 if seeing too large many faint objects cannot be found! we use 1.2 for all bok image
	remove_nthresh=64
	sky_grid=216
	# photometry
	apertures=[3,4,5,6,8,10,13,16,20,25,30,40]
	sky_annuli=[40,20]
	sdssmag_limit=23.5
	edge_nfwhm=3.0 # n*fwhm of seeing to regarded as edge pixels
	min_dist=3.3 # min distance between scuss and sdss pixels
	ab_limit=[1.0,7.0]
	re_limit=[1e-10,5.0]
	nmax_fit=7
	nmax_force_fit=10
	nradius=10 # n*effective radius to create models and residual map
	remove_big_width=500 # remove big objects

	# get list of fits, psfs, flags or weights
	fitslist=args.fits
	flaglist=args.flag
	weightlist=args.weight
	psflist=args.psfex
	
	nfits=len(fitslist)
	nflag=len(flaglist)
	nweight=len(weightlist)
	npsf=len(psflist)
	
	# print help if no parameters provided
	if len(args.fits) == 0:
		parser.print_help()
	# verify whether file numbers are equal
	if npsf !=0 and npsf != nfits:
		print('The number of PSF files should be same as the FITS image')
		sys.exit(-1)
	stacked=args.stacked
	if stacked:
		if nweight !=0 and nfits != nweight:
			print('The number of weight images should be same as the Stacked images')
			sys.exit(-1)
	else:
		if nflag !=0 and nflag != nfits:
			print('The number of flag images should be same as the single-epoch images')
			sys.exit(-1)
	
	print(datetime.datetime.now())
	for i,ifits in enumerate(fitslist):
		start=time.time()
		print('phot '+ifits+' ...')
		if not os.path.isfile(ifits):
			continue
		# get FITS image
		fitsname,_=os.path.splitext(ifits)
		fitspath,name=os.path.split(ifits)
		data=fits.getdata(ifits)
		head=fits.getheader(ifits)
		if 'CCD_NO:' in head:
			head.rename_keyword('CCD_NO:','CCD_NO')
		filt=head['filter']
		if filt == 'bokr': filt='r'
		if not stacked:
			datestr=head['date-obs']
			timestr=head['time-obs']
			tt=Time(datestr+' '+timestr,format='iso',scale='utc')
			mjd=tt.mjd
		seeing = None
		if 'seeing' in head:
			seeing=head['seeing']/0.455 # seeing in pixels
		edge_pixels=np.ceil(edge_nfwhm*seeing).astype('int')
	
		#get weight, flag, mask
		mask_sat=data > saturate
		if stacked:
			if nweight ==0:
				weight=fits.getdata(os.path.join(fitspath,'b_'+name))
			else:
				weight=fits.getdata(weightlist[i])
			if mask_sat.any():
				weight[mask_sat]+=1
			flag=(weight == 0).astype('int8')
			mask=flag.copy()
			tmpphot=pyphot.phot.PyPhot(weight,mask=mask)
			tmpphot.get_sky_single(method="median",nsample=10000,sigma=3.0,iters=0,show=False)
			wmed=tmpphot.sky
			gain=gain*wmed
			rdnoise=rdnoise/np.sqrt(wmed)
			print('weight file: '+weightlist[i])
		else:
			if nflag ==0:
				ccd=str(head['CCD_NO'])
				date=head['date-obs']
				flagfile=get_flagfile(date,ccd)
				flag=fits.getdata(flagfile)
			else:
				flag=fits.getdata(flaglist[i])
			flag=flag != 0
			mask=flag.copy()
			weight=None
			print('flag file: '+flagfile)

		flag[mask_sat]=np.bitwise_or(flag[mask_sat],2)
		if edge_pixels > 0:
			flag[:edge_pixels,:]=np.bitwise_or(flag[:edge_pixels,:],4)
			flag[-edge_pixels:,:]=np.bitwise_or(flag[-edge_pixels:,:],4)
			flag[:,:edge_pixels]=np.bitwise_or(flag[:,:edge_pixels],4)
			flag[:,-edge_pixels:]=np.bitwise_or(flag[:,-edge_pixels:],4)

		psffile=fitsname+'.psf'
		# get psf file
		if npsf == 0:
			if not os.path.isfile(psffile):
				get_psfex(ifits,stacked=stacked)
		else:
			if os.path.isfile(psflist[i]):
				psffile=psflist[i]
			else:
				get_psfex(ifits,stacked=stacked)
		
		# get sdss catalog
		sdsscat=fitsname+'-sdss.fits'
		#if args.add_sdsscat or args.force_sdss or (not args.add_sdsscat and not args.force_sdss and args.phot_sdss) :
		ra,dec=fits_center(head,stacked=stacked)
		ra=ra[0] ; dec=dec[0]
		print('fits center: {}, {}'.format(ra,dec))
		if not os.path.isfile(sdsscat):
			d=DownSDSSCat()
			d.get_allmag_cat(ra=ra,dec=dec,width=0.6,psfmag_limit=sdssmag_limit,filename=sdsscat,fmt='fits')
		if os.path.isfile(sdsscat):
			print('sdss catalog: '+sdsscat)
		else:
			print('sdss catalog isnot found!')
		# processing sdss catalogs
		sdss_params=None
		if os.path.isfile(sdsscat) and os.path.getsize(sdsscat)!=0:
			sdss=fits.getdata(sdsscat,1)
			sdss_params,sdss=params_from_sdss(sdss,head,force_filter=args.force_filter)

		# get psfex PSF, initialize photometry
		psf=pyphot.psfex.PSFEx()
		psf.from_fits(psffile)
		phot=pyphot.phot.PyPhot(data,uncertainty=None,mask=mask,flags=flag,weight=weight,saturate=saturate,gain=gain,rdnoise=rdnoise,seeing=seeing,nthread=args.nthread)

		if os.path.isfile(fitsname+'-sky.fits') and os.path.isfile(fitsname+'-sky_rms.fits'):
			print('get sky and rms map from saved fits')
			phot.sky=fits.getdata(fitsname+'-sky.fits')
			phot.sky_rms=fits.getdata(fitsname+'-sky_rms.fits')
		else:
			phot.get_sky_map(nsample=30000,grid_size=sky_grid)
		if stacked:
			#phot.get_sky_single(nsample=100000)
			phot.weighted_gain_rdnoise()
		#else:
		#	phot.get_sky_map(nsample=30000,grid_size=216)
		# get flux error map	
		if seeing is None:
			phot.get_seeing()
		phot.update_data_uncertainty(poisson=True)
		if args.save_sky:
			print('save sky and rms maps')
			fits.writeto(fitsname+'-sky.fits',phot.sky,header=head,clobber=True,output_verify='fix+warn')
			fits.writeto(fitsname+'-sky_rms.fits',phot.sky_rms,header=head,clobber=True,output_verify='fix+warn')
		
		# detecting source in bok image
		tmp_prop=phot.detect_objects(detect_nsigma=detect_nsigma,conv_sigma=conv_sigma,analysis_nsigma=aly_nsigma,remove_nthresh=remove_nthresh,sig_flag=False,remove_after_segment=True,remove_big_width=remove_big_width)
		coord_scuss=np.array((tmp_prop['x_window'],tmp_prop['y_window'])).transpose()
		nscuss=len(coord_scuss)

		# get SDSS coordinates if also fitting them
		coord=coord_scuss
		if sdss_params is not None:
			coord_sdss=np.array((sdss_params['x'],sdss_params['y'])).transpose()
			index_scuss,index_sdss=closest_match(coord_scuss,coord_sdss,min_dist=min_dist)
			index_scuss=np.array(index_scuss)
			indsdss_match=np.array([idx[0] for idx in index_sdss])
			indsdss_nomatch=np.setdiff1d(np.arange(len(coord_sdss)),indsdss_match)
			nuniq_sdss=len(indsdss_nomatch)
			if args.phot_sdss and nuniq_sdss > 0:
				coord=np.vstack((coord_scuss,coord_sdss[indsdss_nomatch]))
			is_sdss=Table.Column(np.zeros(len(coord)).astype('int'),'is_sdss')
			is_sdss[index_scuss]=1
			if nuniq_sdss > 0:
				is_sdss[nscuss:]=1
		else:
			is_sdss=Table.Column(np.zeros(len(coord)).astype('int'),'is_sdss')
		# calculate MJD for single-epoch images
		if not stacked:
			mjd_col=Table.Column(np.ones(len(coord))*mjd,'bok_mjd')
		
		# Self-photometry with either Bok objects or toghether with SDSS objects
		print('Photometry with shape parameters of the image itself ...')
		if sdss_params is not None and args.phot_sdss:
			det_sc=phot.detect_objects(detect_nsigma=detect_nsigma,conv_sigma=conv_sigma,analysis_nsigma=aly_nsigma,remove_nthresh=remove_nthresh,sig_flag=False,remove_after_segment=True,coordinates=coord,remove_big_width=remove_big_width)
		else:
			det_sc=tmp_prop
		if det_sc == False:
			continue
		if args.save_segment:
			fits.writeto(fitsname+'-segment.fits',phot.seglabels,header=head,clobber=True,output_verify='fix+warn')

		# self model photometry
		param_det=phot.parameter_guess(det_sc,xname='x_window',yname='y_window',remove_seeing=False)
		mod_sc=phot.model_photometry(psf,param_guess=param_det,pos_limit=True,ab_limit=ab_limit,re_limit=re_limit,nmax_fit=nmax_fit,remove_seeing=False)
		if args.save_model:
			model,residual=phot.create_model_residual(mod_sc,psf,nradius=nradius,add_seeing=False)
			fits.writeto(fitsname+'-model.fits',model,header=head,clobber=True,output_verify='fix+warn')
			fits.writeto(fitsname+'-residual.fits',residual,header=head,clobber=True,output_verify='fix+warn')
		#mod_sc_bak=mod_sc.copy()
			
		det_sc=Table(det_sc)
		mod_sc=Table(mod_sc)
		rename_table(det_sc,prefix='bok_',postfix='_'+filt)
		rename_table(mod_sc,prefix='bok_',postfix='_'+filt)
		del_cols=['x_min','x_max','y_min','y_max','cxx','cxx_error','cyy','cyy_error','cxy','cxy_error']
		for col in del_cols:
			det_sc.remove_column('bok_'+col+'_'+filt)
		del_cols=['id','x_in','y_in','x_psf','x_psf_error','y_psf','y_psf_error','x_dev','x_dev_error','y_dev','y_dev_error','x_exp','x_exp_error','y_exp','y_exp_error']
		for col in del_cols:
			mod_sc.remove_column('bok_'+col+'_'+filt)
		fluxadu=det_sc
		fluxadu.add_columns(mod_sc.columns.values())
		index_det=det_sc['bok_id_'+filt]
		if not stacked:
			fluxadu.add_column(mjd_col[index_det])
		fluxadu.add_column(is_sdss[index_det])
		

		# add SDSS information
		bok_id='bok_id_'+filt
		if sdss_params is not None:
			sdss=Table(sdss)
			rename_table(sdss,prefix='sdss_')
			sdss.add_columns([table.Column(sdss_params['x'],name='sdss_bokx_'+filt), 
				table.Column(sdss_params['y'],name='sdss_boky_'+filt)],indexes=[0,0])
			sdss[bok_id]=-1
			index1=np.array([ind for ind in np.arange(len(indsdss_match)) if index_scuss[ind] in index_det])
			if len(index1) > 0:
				sdss[bok_id][indsdss_match[index1]]=index_scuss[index1]
			if nuniq_sdss > 0:
				if args.phot_sdss:
					index2=np.array([ind for ind in np.arange(nuniq_sdss) if ind+nscuss in index_det])
					if len(index2)>0:
						sdss[bok_id][indsdss_nomatch[index2]]=index2+nscuss 
				else:
					for ii in range(nuniq_sdss): fluxadu.add_row()
					nscuss_det=len(index_det)
					fluxadu[bok_id][nscuss_det:]=nscuss+np.arange(nuniq_sdss)
					fluxadu['is_sdss'][nscuss_det:]=1
			fluxadu=table.join(fluxadu,sdss,join_type='left')
			fluxadu=fluxadu.filled(fill_value=0)


		# force photometry
		if sdss_params is not None and args.force_sdss:
			print('Force Photometry with SDSS shape parameters ...')
			det_sd=phot.detect_objects(detect_nsigma=detect_nsigma,conv_sigma=conv_sigma,analysis_nsigma=aly_nsigma,remove_nthresh=remove_nthresh,sig_flag=False,remove_after_segment=True,coordinates=coord_sdss,only_position=True,remove_big_width=remove_big_width)
			if det_sd != False:
				sdss_params=sdss_params[det_sd['id']]
				sdss_params['x']=det_sd['x_window']
				sdss_params['y']=det_sd['y_window']
				mod_sd=phot.model_photometry(psf,param_guess=sdss_params,pos_limit=True,ab_limit=True,re_limit=True,theta_limit=True,nmax_fit=nmax_force_fit,remove_seeing=True,models=['psf','dev','exp'])
				if args.save_model:
					fmodel,fresidual=phot.create_model_residual(mod_sd,psf,nradius=nradius,add_seeing=True)
					fits.writeto(fitsname+'-fmodel.fits',fmodel,clobber=True,output_verify='fix+warn')
					fits.writeto(fitsname+'-fresidual.fits',fresidual,clobber=True,output_verify='fix+warn')
				mod_sd=Table(mod_sd)
				rename_table(mod_sd,prefix='bokf_',postfix='_'+filt)
				del_cols=['x_psf','x_psf_error','y_psf','y_psf_error','x_dev','x_dev_error','y_dev','y_dev_error','re_dev','re_dev_error','ab_dev','ab_dev_error','theta_dev','theta_dev_error','x_exp','x_exp_error','y_exp','y_exp_error','re_exp','re_exp_error','ab_exp','ab_exp_error','theta_exp','theta_exp_error']
				for col in del_cols:
					mod_sd.remove_column('bokf_'+col+'_'+filt)
				mod_sd.rename_column('bokf_x_in_'+filt,'bokf_x_window_'+filt)
				mod_sd.rename_column('bokf_y_in_'+filt,'bokf_y_window_'+filt)
				mod_sd.rename_column('bokf_id_'+filt,bok_id)
				mod_sd[bok_id]=sdss[bok_id][det_sd['id']]
				fluxadu=table.join(fluxadu,mod_sd,join_type='left')
				fluxadu=fluxadu.filled(fill_value=0)

		# get coordinates for aperture photometry
		aper_coord=np.array([fluxadu['bok_x_window_'+filt],fluxadu['bok_y_window_'+filt]]).transpose()
		if sdss_params is not None and not args.phot_sdss and nuniq_sdss>0:
			aper_coord[nscuss_det:,0]=fluxadu['sdss_bokx_'+filt][nscuss_det:]
			aper_coord[nscuss_det:,1]=fluxadu['sdss_boky_'+filt][nscuss_det:]
			if args.force_sdss:
				index=np.array([ind for ind in sdss[bok_id][det_sd['id']] if ind >= nscuss])
				if len(index) > 0:
					aper_coord[index,0]=fluxadu['bokf_x_window_'+filt][index]
					aper_coord[index,1]=fluxadu['bokf_y_window_'+filt][index]

		print('Aperture Photometry ....')
		aper_prop=phot.aperture_photometry(aper_coord,apertures=apertures,sky_annulus=sky_annuli,aper_type='circle',sky_type='global')
		aper_prop=Table(aper_prop)
		rename_table(aper_prop,prefix='bok_',postfix='_'+filt)
		del_cols=['id']
		for col in del_cols:
			aper_prop.remove_column('bok_'+col+'_'+filt)
		aper_prop.rename_column('bok_x_in_'+filt,'bok_x_aper_'+filt)
		aper_prop.rename_column('bok_y_in_'+filt,'bok_y_aper_'+filt)
		fluxadu.add_columns(aper_prop.columns.values())
		#re-order the objects
		fluxadu[bok_id]=np.arange(len(fluxadu))

		fluxfile=fitsname+'-fluxadu.fits'
		fits.writeto(fluxfile,fluxadu.as_array(),clobber=True,output_verify='fix+warn')
		ind_head=head.index('RA-OBS')
		f=fits.open(fluxfile,mode='update')
		f[1].header["apersize"]=('3,4,5,6,8,10,13,16,20,25,30,40','aperture radii in pixels')
		f[1].header.extend(head.cards[ind_head:],bottom=True)
		f.flush(output_verify='fix+warn')
		f.close()

		# calibrate flux and aperture corrections
		fluxcalib=calibrate_fluxadu(fluxadu,head,stacked=stacked)
		if fluxcalib == False:
			continue
		plotname=None
		if args.plot:
			plotname=fitsname
		fluxcor=magnitude_correction(fluxcalib,head,plot_name=plotname)
		corfile=fitsname+'-fluxcor.fits'
		fits.writeto(corfile,fluxcor.as_array(),clobber=True,output_verify='fix+warn')
		f=fits.open(corfile,mode='update')
		f[1].header.extend(head.cards[ind_head:],bottom=True)
		f.flush(output_verify='fix+warn')
		f.close()

		print('Total processing time for '+fitsname,time.time()-start)
	print(datetime.datetime.now())
Beispiel #29
0
        'panstarrs_gMAG': 'F334', 'panstarrs_rMAG': 'F335', 'panstarrs_iMAG': 'F336', 'panstarrs_zMAG': 'F337', 'panstarrs_yMAG': 'F338'}

filt_type = ['narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', 'narrowband', \
            'sdss', 'sdss', 'sdss', 'sdss', 'sdss', \
            'allwise', 'allwise', \
            'panstarrs', 'panstarrs', 'panstarrs', 'panstarrs', 'panstarrs', \
            'clash','clash','clash','clash','clash','clash','clash','clash']

filt_lam = [4920, 5000, 5100, 5200, 5320, 5400, 5500, 5600, 5680, 5800, 5890, 6000, 6100, 6200, 6300, 6400, 6500, 6600, \
        3543, 4770, 6231, 7625, 9134, \
        33682, 46179, \
        4849.11, 6201.19, 7534.96, 8674.18, 9627.77, \
        3817.2, 4448, 5470.2, 6505.4, 7960.5, 7671.2, 9028.2, 21574]

filt_table = Table()
filt_table.add_columns([filt_name, filt_lam, filt_type],
                       names=['name', 'lam', 'type'])

# # Find where magnitudes end
# imagerr_end = np.where(np.array(cat.colnames) == 'filter_count')[0][0]
# fnames_original = cat.colnames[2:imagerr_end:2]
# # Translate original filter names from gal_table to eazy filter names using dictionary
# fnames = [filt_Dict[fnames_original[x]] for x in range(len(fnames_original))]
# hfilt = np.in1d(filt_table['name'], fnames)
# filt_table = filt_table[hfilt]

fnames = cat.colnames[4:-4:2]
# print(fnames)
hfilt = np.in1d(filt_table['name'], fnames)
filt_table = filt_table[hfilt]
# print(filt_table)
Beispiel #30
0
def read_spectrum(filepath, filesource='auto'):
    """read SDSS/LAMOST spectrum

    Parameters
    ----------

    filepath: string
        input file path

    filesource: string
        {'sdss_dr12' / 'lamost_dr2' / 'lamost_dr3'}

    Returns
    -------

    specdata: astropy.table.Table
        spectra as a table

    """
    # auto-identify the spectrum origination
    if filesource == 'auto':
        telescope = fits.open(filepath)[0].header['TELESCOP']
        if telescope == 'SDSS 2.5-M':
            return read_spectrum(filepath, filesource='sdss_dr12')
        if telescope == 'LAMOST':
            return read_spectrum(filepath, filesource='lamost_dr3')

    # SDSS DR7 spectrum
    if filesource == 'sdss_dr7':
        hdulist = fits.open(filepath)

        # 1. flux, flux_err, mask
        data = hdulist[0].data  # 5 rows
        flux = data[0][:]
        flux_err = data[2][:]
        mask = data[3][:]

        # 2. wave
        # http://iraf.net/irafdocs/specwcs.php
        # wi = CRVALi + CDi_i * (li - CRPIXi)
        CRVAL1 = hdulist[0].header['CRVAL1']
        CD1_1 = hdulist[0].header['CD1_1']
        CRPIX1 = hdulist[0].header['CRPIX1']
        Npix = len(flux)
        wavelog = CRVAL1 + (np.arange(Npix) + 1 - CRPIX1) * CD1_1
        wave = np.power(10, wavelog)

        spec = Table([wave, flux, flux_err, mask],
                     names=['wave', 'flux', 'flux_err', 'mask'])
        return spec

    # SDSS DR10/DR12 spectrum
    if filesource == 'sdss_dr10' or filesource == 'sdss_dr12':
        data = fits.open(filepath)
        specdata = Table(data[1].data)
        wave = Column(name='wave', data=np.power(10., specdata['loglam']))
        flux_err = Column(name='flux_err', data=(specdata['ivar']) ** -0.5)
        specdata.add_columns([wave, flux_err])
        return specdata

    # LAMOST DR2/DR3 spectrum
    if filesource == 'lamost_dr3' or filesource == 'lamost_dr2' or filesource == 'lamost_dr1':
        data = fits.open(filepath)
        specdata = Table(data[0].data.T)
        flux = Column(name='flux', data=specdata['col0'])
        ivar = Column(name='ivar', data=specdata['col1'])
        flux_err = Column(name='flux_err', data=(specdata['col1']) ** -0.5)
        wave = Column(name='wave', data=specdata['col2'])
        and_mask = Column(name='and_mask', data=specdata['col3'])
        or_mask = Column(name='or_mask', data=specdata['col4'])
        # for flux_err, convert inf to nan
        flux_err[np.isinf(flux_err.data)] = np.nan
        return Table([wave, flux, flux_err, ivar, and_mask, or_mask])

    return None
    def join_cats(self, no_conv, check_phot):
        """ """
        if no_conv:
            images = self.par_info['image']
        else:
            images = self.par_info['convim']

        # read in F110W catalog (all UVIS fields have F110W)
        f110cat = os.path.join(self.outdir, 'F110W_cat.fits')
        f = fits.open(f110cat)
        uvisdata = f[1].data
        f.close()
        # get all other catalogs
        cats = glob(os.path.join(self.outdir, 'F*cat.fits'))
        cats = [x for x in cats if x != f110cat]

        # read in original catalog
        c = os.path.join(self.datadir,self.par,'DATA/DIRECT_GRISM/fin_F110.cat')
        refdata = np.genfromtxt(c)
        idx,separc = match_cats(uvisdata['x_world'], uvisdata['y_world'], 
                                refdata[:,7], refdata[:,8])
        match = (separc.value*3600. <= 0.2)
        # d['x_world'][match]
        # dref[:,7][idx[match]]
        t = Table(uvisdata[match])  
     
        print
        for phot in ['ISO', 'AUTO', 'APER']: # 'PETRO'
            # fix errors
            w110 = np.where(self.par_info['filt'] == 'F110W')
            print 'fix errors for F110W, %s' % phot
            eflux,emag = calc_errors(t, images[w110][0], 
                                self.par_info['rms'][w110][0],
                                os.path.join(self.outdir,'F110W_seg.fits'), 
                                self.par_info['exptime'][w110][0], phot=phot)

            #eflux=eflux.reshape(t['FLUXERR_%s'%phot].shape)
            t['FLUXERR_%s'%phot] = eflux
            t['MAGERR_%s'%phot] = emag

            # rename F110 photometry columns
            t.rename_column('FLUX_%s'%phot, 'FLUX_%s_F110W'%phot)
            t.rename_column('FLUXERR_%s'%phot, 'FLUXERR_%s_F110W'%phot)
            t.rename_column('MAG_%s'%phot, 'MAG_%s_F110W'%phot)
            t.rename_column('MAGERR_%s'%phot, 'MAGERR_%s_F110W'%phot)
            
        # add column of indices from fin_F110.cat 
        t.add_column(Column(data=refdata[:,1][idx[match]], name='WISP_NUMBER'),
                     index=0)
        
        # add in photometry from other catalogs
        for i, cat in enumerate(cats):
            i += 1
            print cat
            f = fits.getdata(cat)
            d = f[match]
            filtstr = os.path.basename(cat).split('_')[0]
            for j,phot in enumerate(['ISO', 'AUTO', 'APER']):
                index = (i*12 + 2) + (j * 4)

                # fix errors - UVIS errors are fine
                if filtstr == 'F160W':
                    w160 = np.where(self.par_info['filt'] == 'F160W')
                    print 'fix errors for %s, %s' % (filtstr, phot)
                    eflux,emag = calc_errors(d, images[w160][0],
                                    self.par_info['rms'][w160][0],
                                    os.path.join(self.outdir,'F110W_seg.fits'), 
                                    self.par_info['exptime'][w160][0], 
                                    phot=phot)
                    d['FLUXERR_%s'%phot] = eflux
                    d['MAGERR_%s'%phot] = emag

                t.add_columns([Column(data=d['FLUX_%s'%phot], 
                                  name='FLUX_%s_%s'%(phot,filtstr)),
                               Column(data=d['FLUXERR_%s'%phot],
                                  name='FLUXERR_%s_%s'%(phot,filtstr)),
                               Column(data=d['MAG_%s'%phot],
                                  name='MAG_%s_%s'%(phot,filtstr)),
                               Column(data=d['MAGERR_%s'%phot], 
                                  name='MAGERR_%s_%s'%(phot,filtstr))], 
                               indexes=[index, index, index, index])

        # sort by WISP number
        t.sort(['WISP_NUMBER'])
        output = os.path.join(self.outdir, '%s_cat.fits'%self.par)
        t.write(output, format='fits')
        """
        hdu0 = fits.PrimaryHDU()
        hdu1 = fits.BinTableHDU(np.array(t))
        hdulist = fits.HDUList([hdu0, hdu1])
        output = os.path.join(self.outdir, '%s_cat.fits'%self.par)
        hdulist.writeto(output, clobber=True)
        """

        # make region file
        region_wcs(os.path.splitext(output)[0]+'.reg', 
                   t['X_WORLD'], t['Y_WORLD'], t['A_WORLD'], t['B_WORLD'],
                   t['THETA_WORLD'], t['NUMBER'])
        # make region file of original F110W catalog
        region_wcs(os.path.join(self.outdir, 'F110W_orig.reg'), 
                   refdata[:,7], refdata[:,8], refdata[:,9], refdata[:,10],
                   refdata[:,11], refdata[:,1], color='red')

        if check_phot is True:
            print c
            print f110cat
            check_conv_phot(c, output, 'F110W')
            c = os.path.join(self.datadir,self.par,'DATA/DIRECT_GRISM/fin_F160.cat')
            print c
            check_conv_phot(c, output, 'F160W')
Beispiel #32
0
def main():
    parser = argparse.ArgumentParser(description="Returns photometric tables of catalog stars")

    parser.add_argument('catalog',type=str,help='JSON catalog of sources.')
    parser.add_argument('-WISE',type=str,required=True,help='Directory of WISE tables')
    parser.add_argument('-2MASS',type=str,dest='MASS',required=True,help='Directory of 2MASS tables')

    args = parser.parse_args()

    # Get starlist
    print 'Loading JSON data from: %s' % args.catalog
    theList = Star.load(args.catalog)
    print '\tLoaded %i sources.' % len(theList)


    print
    print 'Getting photometry from catalogs...'
    t = star_photometry(theList)
    
    print 'Locating sources in %s' % args.MASS
    t = add_2MASS(t,theList,args.MASS)

    print 'Locating sources in %s' % args.WISE
    t = add_WISE(t,theList,args.WISE)
    
    print

    #t.write('photometry.tsv',format='ascii.tab')
    #exit()

    outfile = 'photometry_ZOMG'

    colnames = [x for x in t.colnames if 'lam' in x]
    for col in colnames:
        t[col] = [99.99 if ((x is None) or (x is 'None')) else x for x in t[col]]

    #for Roberta
    rTable = Table()
    rTable.add_columns([t[x] for x in ['ID','Gal']])
    for col in ['U','B','V','R','I','J','H','K',
                '3.6','4.5','5.8','8.0',
                'W1','W2','W3','W4',
                'F_U_Jy','F_B_Jy','F_V_Jy','F_R_Jy','F_I_Jy',
                'F_J_Jy','F_H_Jy','F_K_Jy',
                'F_3.6_Jy','F_4.5_Jy','F_5.8_Jy','F_8.0_Jy',
                'F_W1_Jy','F_W2_Jy','F_W3_Jy','F_W4_Jy',
                'F_0.36_um','lam_F_0.36_um','F_0.44_um','lam_F_0.44_um','F_0.55_um','lam_F_0.55_um','F_0.71_um','lam_F_0.71_um','F_0.97_um','lam_F_0.97_um',
                'F_1.24_um','lam_F_1.24_um','F_1.66_um','lam_F_1.66_um','F_2.16_um','lam_F_2.16_um',
                'F_3.55_um','lam_F_3.55_um','F_4.44_um','lam_F_4.44_um','F_5.73_um','lam_F_5.73_um','F_7.87_um','lam_F_7.87_um',
            'F_3.35_um','lam_F_3.35_um','F_4.60_um','lam_F_4.60_um','F_11.56_um','lam_F_11.56_um','F_22.09_um','lam_F_22.09_um']:
        c = Column([np.float(x) if x else 99.99 for x in t[col]],name=col,dtype=np.float)
        rTable.add_column(c)

    rTable = photo_corr(rTable)

    rTable.write(outfile+'.tsv',format='ascii.tab')
    rTable.write(outfile+'.fits')
    exit()
    
        
    newCols = []
    for col in colnames:
        c = Column([np.float(x) for x in t[col]],name=col,dtype=np.float)
        newCols.append(c)
    #print colnames
    eTable = Table()
    eTable.add_column(t['ID'])
    #eTable.add_column(t['lam_F_0.55_um'])
    #c = Column([str(x) for x in t['ID']],name='ID',dtype=str)
    #eTable.add_column(c)
    #eTable.add_columns([t[x] for x in colnames])
    eTable.add_columns(newCols)

    #print eTable['ID'].dtype
    #exit()
               
    #print eTable.colnames
    #for col in eTable.colnames:
    #    print eTable[col]

    #print eTable
    outfile = 'photometry.fits'
    print 'Writing table to %s' % outfile
    eTable.write(outfile)
Beispiel #33
0
"""
Generate input file for single sersic model
"""

import numpy as np
from astropy.table import Table, Column
from astropy.io import fits

source_table = '../../testser/RAWFIT00000.00268.fits'
outname = 'input_ser.fits'

source = Table.read(source_table)

t_input = Table()
t_input.add_column(source['NAME'])

# put best-fit params of test Sersic fit as initial values
val = Column(
        name='SER_VAL',
        data=source['FIT_SER'].data)

fix = Column(
        name='SER_FIX',
        data=np.zeros((len(source), 8))
        )
fix[:, 4] = 1.  # do not fit shape of the isophotes
t_input.add_columns([val, fix])
t_input.write(outname)

Beispiel #34
0
def make_sensitivity_curves(tint=1200, spec_res=100, sq_aper_diam=0.30, seeing_limited=False):
    mag = np.arange(10, 22)
    snr_z = np.zeros(len(mag), dtype=float)
    snr_y = np.zeros(len(mag), dtype=float)
    snr_j = np.zeros(len(mag), dtype=float)
    snr_h = np.zeros(len(mag), dtype=float)
    snr_sum_z = np.zeros(len(mag), dtype=float)
    snr_sum_y = np.zeros(len(mag), dtype=float)
    snr_sum_j = np.zeros(len(mag), dtype=float)
    snr_sum_h = np.zeros(len(mag), dtype=float)
    bkg_z = np.zeros(len(mag), dtype=float)
    bkg_y = np.zeros(len(mag), dtype=float)
    bkg_j = np.zeros(len(mag), dtype=float)
    bkg_h = np.zeros(len(mag), dtype=float)
    star_z = np.zeros(len(mag), dtype=float)
    star_y = np.zeros(len(mag), dtype=float)
    star_j = np.zeros(len(mag), dtype=float)
    star_h = np.zeros(len(mag), dtype=float)

    spec_z_tab = None
    spec_y_tab = None
    spec_j_tab = None
    spec_h_tab = None

    # Calculate the number of supernovae.
    N_SNe = 4500.0 * 0.6 * 10**(mag - 18.9)

    out_file = 'roboAO_sensitivity_t{0:d}_R{1:d}_ap{2:0.3f}'.format(tint, spec_res,
                                                                    sq_aper_diam)
    
    # Save the output to a table.
    _out = open(out_file + '.txt', 'w')

    meta1 = '# tint = {0:5d}, R = {1:5d}, sq_ap_diam = {2:5.3f}"\n'
    _out.write(meta1.format(tint, spec_res, sq_aper_diam))
    _out.write('# Sensitivity integrated over broad band.')
    
    hdr = '{0:5s}  {1:6s}  {2:5s}  {3:5s}  {4:5s}  {5:5s}  {6:5s}  {7:5s}\n'
    fmt = '{0:5.1f}  {1:6.1f}  {2:5.1f}  {3:5.1f}  {4:5.1f}  {5:5.1f}  {6:5.1f}  {7:5.1f}\n'
    _out.write(hdr.format('# Mag', 'N_SNe', 'J_SNR', 'H_SNR', 'J_ms', 'H_ms', 'J_mb', 'H_mb'))
               
    for mm in range(len(mag)):
        print 'Mag: ', mag[mm]
        blah_z = etc_uh_roboAO(mag[mm], 'Z', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam, 
                               seeing_limited=seeing_limited)
        blah_y = etc_uh_roboAO(mag[mm], 'Y', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam, 
                               seeing_limited=seeing_limited)
        blah_j = etc_uh_roboAO(mag[mm], 'J', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        blah_h = etc_uh_roboAO(mag[mm], 'H', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        
        col_z_suffix = '_Z_{0:d}'.format(mag[mm])
        col_y_suffix = '_Y_{0:d}'.format(mag[mm])
        col_j_suffix = '_J_{0:d}'.format(mag[mm])
        col_h_suffix = '_H_{0:d}'.format(mag[mm])

        spec_signal_z = Column(name='sig'+col_z_suffix, data=blah_z[4])
        spec_signal_y = Column(name='sig'+col_y_suffix, data=blah_y[4])
        spec_signal_j = Column(name='sig'+col_j_suffix, data=blah_j[4])
        spec_signal_h = Column(name='sig'+col_h_suffix, data=blah_h[4])
        spec_bkg_z = Column(name='bkg'+col_z_suffix, data=blah_z[5])
        spec_bkg_y = Column(name='bkg'+col_y_suffix, data=blah_y[5])
        spec_bkg_j = Column(name='bkg'+col_j_suffix, data=blah_j[5])
        spec_bkg_h = Column(name='bkg'+col_h_suffix, data=blah_h[5])
        spec_snr_z = Column(name='snr'+col_z_suffix, data=blah_z[6])
        spec_snr_y = Column(name='snr'+col_y_suffix, data=blah_y[6])
        spec_snr_j = Column(name='snr'+col_j_suffix, data=blah_j[6])
        spec_snr_h = Column(name='snr'+col_h_suffix, data=blah_h[6])

        
        if spec_z_tab == None:
            spec_z_tab = Table([blah_z[3]], names=['wave_Z'])
        if spec_y_tab == None:
            spec_y_tab = Table([blah_y[3]], names=['wave_Y'])
        if spec_j_tab == None:
            spec_j_tab = Table([blah_j[3]], names=['wave_J'])
        if spec_h_tab == None:
            spec_h_tab = Table([blah_h[3]], names=['wave_H'])

        spec_z_tab.add_columns([spec_signal_z, spec_bkg_z, spec_snr_z])
        spec_y_tab.add_columns([spec_signal_y, spec_bkg_y, spec_snr_y])
        spec_j_tab.add_columns([spec_signal_j, spec_bkg_j, spec_snr_j])
        spec_h_tab.add_columns([spec_signal_h, spec_bkg_h, spec_snr_h])

        snr_z[mm]  = blah_z[0]
        snr_y[mm]  = blah_y[0]
        snr_j[mm]  = blah_j[0]
        snr_h[mm]  = blah_h[0]
        snr_sum_z[mm] = math.sqrt((spec_snr_z**2).sum())
        snr_sum_y[mm] = math.sqrt((spec_snr_y**2).sum())
        snr_sum_j[mm] = math.sqrt((spec_snr_j**2).sum())
        snr_sum_h[mm] = math.sqrt((spec_snr_h**2).sum())

        star_z[mm]  = blah_z[1]
        star_y[mm]  = blah_y[1]
        star_j[mm]  = blah_j[1]
        star_h[mm]  = blah_h[1]
        bkg_z[mm]  = blah_z[2]
        bkg_y[mm]  = blah_y[2]
        bkg_j[mm]  = blah_j[2]
        bkg_h[mm]  = blah_h[2]

    avg_tab = Table([mag, snr_z, snr_y, snr_j, snr_h, 
                     snr_sum_z, snr_sum_y, snr_sum_j, snr_sum_h,
                     star_z, star_y, star_j, star_h, bkg_z, bkg_y, bkg_j, bkg_h],
                    names=['mag', 'snr_z', 'snr_y', 'snr_j', 'snr_h', 
                           'snr_sum_z', 'snr_sum_y', 'snr_sum_j', 'snr_sum_h',
                           'star_z', 'star_y', 'star_j', 'star_h', 
                           'bkg_z', 'bkg_y', 'bkg_j', 'bkg_h'])


    out_file = 'roboAO_sensitivity_t{0:d}_R{1:d}_ap{2:0.3f}'.format(tint, spec_res, sq_aper_diam)

    if seeing_limited:
        out_file += '_seeing'
    
    # Save the tables
    spec_z_tab.write(out_file + '_spec_z_tab.fits', overwrite=True)
    spec_y_tab.write(out_file + '_spec_y_tab.fits', overwrite=True)
    spec_j_tab.write(out_file + '_spec_j_tab.fits', overwrite=True)
    spec_h_tab.write(out_file + '_spec_h_tab.fits', overwrite=True)
    avg_tab.write(out_file + '_avg_tab.fits', overwrite=True)

    return
Beispiel #35
0
def make_tint_curves(mag=20, spec_res=100, sq_aper_diam=0.30, seeing_limited=False):
    tint = np.arange(300, 3600+1, 300)
    snr_y = np.zeros(len(tint), dtype=float)
    snr_j = np.zeros(len(tint), dtype=float)
    snr_h = np.zeros(len(tint), dtype=float)
    
    snr_sum_y = np.zeros(len(tint), dtype=float)
    snr_sum_j = np.zeros(len(tint), dtype=float)
    snr_sum_h = np.zeros(len(tint), dtype=float)
    
    spec_y_tab = None
    spec_j_tab = None
    spec_h_tab = None

    for tt in range(len(tint)):
        print 'Tint: ', tint[tt]
        blah_y = etc_uh_roboAO(mag, 'Y', tint[tt],
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam, 
                               seeing_limited=seeing_limited)
        blah_j = etc_uh_roboAO(mag, 'J', tint[tt],
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        blah_h = etc_uh_roboAO(mag, 'H', tint[tt],
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        
        col_y_suffix = '_Y_{0:d}'.format(tint[tt])
        col_j_suffix = '_J_{0:d}'.format(tint[tt])
        col_h_suffix = '_H_{0:d}'.format(tint[tt])

        spec_signal_y = Column(name='sig'+col_y_suffix, data=blah_y[4])
        spec_signal_j = Column(name='sig'+col_j_suffix, data=blah_j[4])
        spec_signal_h = Column(name='sig'+col_h_suffix, data=blah_h[4])
        spec_bkg_y = Column(name='bkg'+col_y_suffix, data=blah_y[5])
        spec_bkg_j = Column(name='bkg'+col_j_suffix, data=blah_j[5])
        spec_bkg_h = Column(name='bkg'+col_h_suffix, data=blah_h[5])
        spec_snr_y = Column(name='snr'+col_y_suffix, data=blah_y[6])
        spec_snr_j = Column(name='snr'+col_j_suffix, data=blah_j[6])
        spec_snr_h = Column(name='snr'+col_h_suffix, data=blah_h[6])
        
        if spec_y_tab == None:
            spec_y_tab = Table([blah_y[3]], names=['wave_Y'])
        if spec_j_tab == None:
            spec_j_tab = Table([blah_j[3]], names=['wave_J'])
        if spec_h_tab == None:
            spec_h_tab = Table([blah_h[3]], names=['wave_H'])

        spec_y_tab.add_columns([spec_signal_y, spec_bkg_y, spec_snr_y])
        spec_j_tab.add_columns([spec_signal_j, spec_bkg_j, spec_snr_j])
        spec_h_tab.add_columns([spec_signal_h, spec_bkg_h, spec_snr_h])

        snr_y[tt]  = blah_y[0]
        snr_j[tt]  = blah_j[0]
        snr_h[tt]  = blah_h[0]

        snr_sum_y[tt] = math.sqrt((spec_snr_y**2).sum())
        snr_sum_j[tt] = math.sqrt((spec_snr_j**2).sum())
        snr_sum_h[tt] = math.sqrt((spec_snr_h**2).sum())
        

    avg_tab = Table([tint, 
                     snr_y, snr_sum_y, 
                     snr_j, snr_sum_j,
                     snr_h, snr_sum_h],
                    names=['tint', 
                           'snr_y', 'snr_sum_y', 
                           'snr_j', 'snr_sum_j',
                           'snr_h', 'snr_sum_h'])


    out_file = 'roboAO_tint_m{0:d}_R{1:d}_ap{2:0.3f}'.format(mag, spec_res, sq_aper_diam)

    if seeing_limited:
        out_file += '_seeing'
    
    # Save the tables
    spec_y_tab.write(out_file + '_spec_y_tab.fits', overwrite=True)
    spec_j_tab.write(out_file + '_spec_j_tab.fits', overwrite=True)
    spec_h_tab.write(out_file + '_spec_h_tab.fits', overwrite=True)
    avg_tab.write(out_file + '_avg_tab.fits', overwrite=True)

    return
Beispiel #36
0
def main():
    parser = argparse.ArgumentParser(description='Perform centroiding and profiling of input galaxies.')

    parser.add_argument('data',type=str,help='Image file')
    parser.add_argument('region',type=str,help='Region file')

    # Parse dat ass
    args = parser.parse_args()

    # Parse coordinates
    regions = pyregion.open(args.region)
    names = [r.comment for r in regions]
    coords = [(int(r.coord_list[0]),int(r.coord_list[1])) for r in regions]

    # Grab those juicy data meats
    data,header = fits.getdata(args.data,header=True)

    # For each coord, initialize and centroid galaxy
    galaxies = [Galaxy(name,co,data) for co,name in zip(coords,names)]

    # Show each swatch
    pp = PdfPages('galaxy_im.pdf')
    for gal in galaxies:
        pp.savefig(gal.show()[0])
    #plt.show()
    pp.close()

    # Perform aperture photometry
    rows = []
    for gal in galaxies:
        ap = utils.CircularAperture((gal.xc,gal.yc), gal.swatch, 15,19)
        rows.append(ap.run())

    t = Table(rows=rows)
    t.add_column(Column([gal.name for gal in galaxies],name='name'),index=0)

    # Convert to counts/arcsec^2
    pltscale = header['pltscale']    # "/mm
    pltscale = pltscale /1000        # "/um
    pixscaleX = header['xpixelsz']   # um/pix
    pixscaleX = pixscaleX * pltscale # "/pix
    pixscaleY = header['ypixelsz']   # um/pix
    pixscaleY = pixscaleY * pltscale # "/pix
    pixscale = pixscaleX * pixscaleY # arcsec^2/pix^2
    cpa2 = t['counts/pix^2'] / pixscale
    epa2 = t['err_cp2'] * pixscale
    
    cpa2 = Column(cpa2,name='counts/arcsec^2')
    epa2 = Column(epa2,name='err_ca2')
    t.add_columns([cpa2,epa2])

    t.pprint()

    '''
name       #pix      counts/pix^2    err_cp2    counts/arcsec^2    err_ca2   
------- ------------- ------------- ------------- --------------- -------------
NGC4875 706.858347058 7721.59941623 117.896411641   7584.60432695 120.025887186
NGC4869 706.858347058 11151.8722709 173.611128029   10954.0179592 176.746937224
GMP4277 706.858347058 9756.15839964 139.752361268   9583.06656737 142.276604641
GMP4350 706.858347058 8855.62011973 130.378485589   8698.50545946  132.73341559
NGC4860 706.858347058 11748.7754577 157.304664726   11540.3310078 160.145942354
NGC4881 706.858347058 11499.1497714 161.710512127   11295.1341311 164.631369312
NGC4921 706.858347058  12822.257865 215.253917975   12594.7679026 219.141890035
    '''

    # prep PDF
    pp = PdfPages('galaxy_prof.pdf')
    pp2= PdfPages('galaxy_contour.pdf')

    contours = []
    for gal in galaxies:
        # perform contour isophotal fitting for profile
        r,med,err,levels = utils.contour_profile(gal.swatch,get_levels=True)
        contours.append(levels)

        fig = plt.figure()

        # plot profile with errorbars
        plt.errorbar(np.array(r)*pixscaleX,med,yerr=err,fmt='o',ms=8)

        # fit sersic function
        try:
            # hard-coded bullshit!!
            if gal.name == 'NGC4860':
                xi,yi = r[:-2],med[:-2]
            elif gal.name == 'GMP4350':
                xi,yi = r[1:],med[1:]
            elif gal.name == 'NGC4869':
                xi,yi = r[:-4],med[:-4]
            else:
                xi,yi = r,med 
            xf,yf,p = utils.sersic_fit(xi,yi,rebin=100)
        except:
            plt.legend([gal.name])
            continue

        # plot fit
        plt.plot(xf*pixscaleX,yf,'r-')
        plt.ylabel('Counts')
        plt.xlabel('Radius [arcsec]')
        #plt.xlim([0,12])
        #plt.legend([gal.name,'n = %.2f' % p[0]])

        # plot Re as just half max
        Ie = yf.max()/2
        ReIdx,Ie = utils.find_nearest_element(yf,Ie,index=True)
        Re = xf[ReIdx]*pixscaleX
        # calculate Re from exp prof?
        #Re = utils.exp_Re(xf*pixscaleX,yf)
        plt.legend([gal.name,'n = %.2f\nRe = %.2f' % (p[0],Re)])
        plt.plot([np.min(xf)*pixscaleX,Re],[Ie,Ie],'r-.')

        pp.savefig(fig)


    # plot image and contours
    for gal in galaxies:
        fig = plt.figure()
        plt.imshow(gal.swatch,origin='lower',cmap=plt.cm.gist_heat_r)
        plt.contour(gal.swatch,levels=levels,alpha=0.7)
        plt.title(gal.name)


        pp2.savefig(fig)        
        
        

    #plt.show()
    pp.close()
    pp2.close()

    exit()
        

    '''    
Beispiel #37
0
 cleaned = np.vectorize( lambda s: s.strip(':?') )(data)
 empty = np.vectorize( lambda s: s=='' )(data)
 
 table = Table(cleaned, names=fields)
 
 #convert RA/DEC columns This is a massive HACK!!
 ra, dec = table['ra'], table['dec']
 decmap = map(lambda dec: dec.rsplit(' ', 1)[0], dec)
 coords = list(map( ' '.join, zip(ra, decmap) ))
 #coords = Column( name='coords', data=SkyCoord( coords, unit=('h', 'deg') ) )
 coords = SkyCoord( coords, unit=('h', 'deg') )      #TODO: Try use this as column??
 ra_col = Column( coords.ra, 'ra', float )
 dec_col = Column( coords.dec, 'dec', float )
 i = table.colnames.index('ra')
 table.remove_columns( ('ra', 'dec') )
 table.add_columns( [ra_col, dec_col], [i,i] )
 
 #Type filter
 types = [c for c in table.colnames if 'type' in c]
 mtypes = ('AM', 'AS', 'LA', 'IP', 'LI')             #magnetic systems 'IP', 'LI'
 ltype = np.array( [any(t in mtypes for t in tt) for tt in table[types]] )
 
 #Hour angle filter
 #lra = (9 > coords.ra.hour) & (coords.ra.hour < 19)
 
 tq = table[ltype] #&lra
 #tq.sort('ra')
 
 raise SystemExit
 
 #Magnitude filter
Beispiel #38
0
#==============================================================================
# Make Table
#==============================================================================
make_table = False

if make_table:
    data = Table()

    fluxes = np.squeeze(synphot.fluxes).value

    for i, n in enumerate(Names):
        flux = (fluxes[i] * Ms[:, :]).ravel()
        error = 0.01 * flux
        noise = [np.random.normal(0, err) for err in error]
        print np.median(noise / flux) * 100
        data.add_columns([
            Column(flux + noise, 'F%s' % (i + 1)),
            Column(error, 'E%s' % (i + 1))
        ])

    id = Column(name='id', data=np.arange(1, len(data) + 1))
    zspec = Column(name='zspec', data=-1 * np.ones(len(data)))
    data.add_column(id, 0)
    data.add_column(zspec)

    df = data.to_pandas()

    np.savetxt('Lilly/Lilly_SFH.cat',
               data,
               header=' '.join(data.colnames),
               fmt=['%d'] + ['%.5e' for i in range(20)] + ['%.2f'])
Beispiel #39
0
t['caLHacorr'].description = 'Log of Halpha luminosity, extinction corrected, from <log_L_Ha_cor> column 147 in get_proc_elines_CALIFA.csv'
t['caMstars'].unit = 'dex(solMass)'
t['caMstars'].description = 'Log of stellar mass, from column 73 log_Mass in get_proc_elines_CALIFA.csv'
t['caDistMpc'].unit = 'Mpc'
t['caDistMpc'].description = 'Luminosity distance in Mpc computed from caZgas assuming Ho=70, Om=0.27, Ol=0.73'
if 'califa' in rewrite:
    outcols = ['Name']
    for cname in t.colnames:
        if cname.startswith("ca") or cname.startswith("S"):
            outcols.append(cname)
    tsel = t[outcols]
    newt = join(tsel, dl_pipe3d, keys='Name', join_type='left')
    joint = join(tsel, ca_qc, keys='Name', join_type='left')
    newt.add_columns([
        joint['FLAG_CAL_WL_V500'], joint['FLAG_CAL_REGISTRATION_V500'],
        joint['FLAG_CAL_IMGQUAL_V500']
    ],
                     names=['caFlgWav5', 'caFlgReg5', 'caFlgImg5'])
    newt.add_columns([
        joint['FLAG_CAL_WL_V1200'], joint['FLAG_CAL_REGISTRATION_V1200'],
        joint['FLAG_CAL_IMGQUAL_V1200']
    ],
                     names=['caFlgWav12', 'caFlgReg12', 'caFlgImg12'])
    newt['caDistP3d'].unit = 'Mpc'
    newt[
        'caDistP3d'].description = 'Luminosity distance in Mpc from get_proc_elines_CALIFA.csv'
    newt[
        'caFlgWav5'].description = 'Flag (-1/0/1/2=NA/good/minor/bad) for wavelength calibration V500'
    newt[
        'caFlgWav12'].description = 'Flag (-1/0/1/2=NA/good/minor/bad) for wavelength calibration V1200'
    newt[
Beispiel #40
0
def make_table(data,details,units,prefix,ra_lims,dec_lims):
	##Find all of the data in the columns as specified by the
	##user inputs
	shape = data.shape
	entries = shape[0]
	names = data[details[0]]
	RA = get_units(data,details[1],units[0],'angle',entries)
	RA_error = get_units(data,details[2],units[1],'angle',entries)
	Dec = get_units(data,details[3],units[2],'angle',entries)
	Dec_error = get_units(data,details[4],units[3],'angle',entries)
	freq = details[5]
	flux = get_units_blanks(data,details[6],units[4],'flux',entries)
	flux_error = get_units_blanks(data,details[7],units[5],'flux',entries)
	PA = get_units_blanks(data,details[8],units[6],'angle',entries)
	major = get_units_blanks(data,details[9],units[7],'angle',entries)
	minor = get_units_blanks(data,details[10],units[8],'angle',entries)

	if details[11]=='-':
		flags = np.empty(entries); flags.fill(-100000.0)
	else:
		flags = data[details[11]]
	if details[12]=='-':
		ID = np.empty(entries); ID.fill(-100000.0)
	else:
		ID = data[details[12]]
	freqs = []
	fluxs = []
	ferrs = []
	##This handles the case of more than one frequency in a single catalogue
	if len(details)>13:
		length = len(details) - 13
		num_of_freqs = length / 3
		for i in xrange(num_of_freqs):
			freqs.append(details[13+(3*i)])
			fluxs.append(get_units_blanks(data,details[13+1+(3*i)],units[9+(2*i)],'flux',entries))
			ferrs.append(get_units_blanks(data,details[13+2+(3*i)],units[10+(2*i)],'flux',entries))

	def get_lune(ra1,ra2,dec1,dec2):
		'''Calculates the steradian coverage of a lune defined by two RA,Dec
		coords'''
		return abs((ra2*dr-ra1*dr)*(np.sin(dec2*dr)-np.sin(dec1*dr)))

	##Count the number of sources within the requested user lune to calculate the scaled
	##source density of the catalogues
	##If the ra lims do not cross over the RA = 0 line
	if ra_lims[0]<ra_lims[1]:
		sources_in_bounds =  [i for i in xrange(len(RA)) if (RA[i]>=ra_lims[0] and RA[i]<=ra_lims[1]) and (Dec[i]>=dec_lims[0] and Dec[i]<=dec_lims[1])]
		area = get_lune(ra_lims[0],ra_lims[1],dec_lims[0],dec_lims[1])
	##If they do, searching the coords slightly differently and do some rearranging to get the area
	else:
		sources_in_bounds =  [i for i in xrange(len(RA)) if (RA[i]>=ra_lims[0] or RA[i]<=ra_lims[1]) and (Dec[i]>=dec_lims[0] and Dec[i]<=dec_lims[1])]
		extra = 360.0 - ra_lims[0]
		area = get_lune(0,ra_lims[1]+extra,dec_lims[0],dec_lims[1])
	scaled_source_density = (4*np.pi*len(sources_in_bounds))/area

	##Create a new table, and populate with the data in correct units
	t=Table(masked=True,meta={'src_dens':scaled_source_density})
	t_names = Column(name='%s_name' %prefix,data=names,description='Name from catalogue',dtype=str)
	t_ras = Column(name='%s_RAJ2000' %prefix,data=RA,description='Right Ascension of source (J2000)',unit='deg',dtype=float)
	t_rerrs = Column(name='%s_e_RAJ2000' %prefix,data=RA_error,description='Error on Right Ascension',unit='deg',dtype=float)
	t_decs = Column(name='%s_DEJ2000' %prefix,data=Dec,description='Declination of source (J2000)',unit='deg',dtype=float)
	t_derrs = Column(name='%s_e_DEJ2000' %prefix,data=Dec_error,description='Error on Declination',unit='deg',dtype=float)
	t_fluxes = Column(name='%s_S%d' %(prefix,float(freq)),data=flux,description='Source flux at %.1fMHz' %float(freq),unit='Jy',dtype=float)
	t_ferrs = Column(name='%s_e_S%d' %(prefix,float(freq)),data=flux_error,description='Error on flux at %.1fMHz' %float(freq),unit='Jy',dtype=float)
	t_majors = Column(name='%s_MajAxis' %prefix,data=major,description='Fitted major axis',unit='deg',dtype=float)
	t_minors = Column(name='%s_MinAxis' %prefix,data=minor,description='Fitted minor axis',unit='deg',dtype=float)
	t_PAs = Column(name='%s_PA' %prefix,data=PA,description='Fitted Position Angle',unit='deg',dtype=float)
	mask = []
	for i in flags:
		if type(i)==np.ma.core.MaskedConstant: mask.append(True)
		else: mask.append(False)
	t_flags = MaskedColumn(name='%s_flag' %prefix,data=flags,description='Any meta flag for inclusion',mask=mask,fill_value='--',dtype=str)
	mask = []
	for i in ID:
		if type(i)==np.ma.core.MaskedConstant: mask.append(True)
		else: mask.append(False)
	t_fields = MaskedColumn(name='%s_FieldID' %prefix,data=ID,description='If avaiable, image field ID',mask=mask,fill_value='--',dtype=str)

	t.add_columns([t_names,t_ras,t_rerrs,t_decs,t_derrs,t_fluxes,t_ferrs,t_majors,t_minors,t_PAs,t_flags,t_fields])
	#Again, handles multiple frequencies in one catalogues
	if len(freqs)>0:
		for i in xrange(len(freqs)):
			t_fextra = Column(name='%s_Flux_%.1f' %(prefix,float(freqs[i])),data=fluxs[i],description='Source flux at %.1fMHz' %float(freqs[i]),unit='Jy',dtype=float)
			t_ferrextra = Column(name='%s_Flux_%.1f_err' %(prefix,float(freqs[i])),data=ferrs[i],description='Error on flux at %.1fMHz' %float(freqs[i]),unit='Jy',dtype=float)
			t.add_columns([t_fextra,t_ferrextra])

	##Add the source density to the table
	#t.add_keyword('%s_nu' %prefix,str(scaled_source_density))
	t.write('simple_%s.fits' %prefix,overwrite=True,format='fits')

	return scaled_source_density
Beispiel #41
0
def main():
	epilog='''The BASS DR1 pipeline is designed for images taken by the Bok 
	Telescope.
	It make photometry for both SDSS detected objects and Bok-only sources. 
	The photometric methods include automatic aperture, circular aperture, PSF 
	and model photometry. The model photometry can use SDSS shape parameters for
	force photometry or use self-guessed parameters to refit. Two models are 
	Dev (1/4) and Exp laws. Gain and read-out noise is read from the FITS header.
	if single image, only flag should be provided. If weight, flag, psfex, 
	sdss is empty, the program will automatically find or calculate them'''

	parser = argparse.ArgumentParser(description='BASS DR1 Photometric Pipeline.',
			epilog=epilog,fromfile_prefix_chars='@')
	parser.add_argument('fits', metavar='FITS', type=str, nargs='*',
			help='Bok calibrated images to do photometry. @filelist to read from file')
	parser.add_argument('-f','--flag',type=str,nargs='*',default=[],
			help='flag image (bad pixels). @filelist to read from file')
	parser.add_argument('--psfex',type=str,nargs='*',metavar='psfex',default=[],
			help='PSF file derived by PSFEx')
	parser.add_argument('--force-filter',type=str,default=None, 
			help='Force Photometry with SDSS filter shape parameters, default None (use the same filter as the image')
	parser.add_argument('--save-sky', action='store_true',
			help='save the 2d sky background and rms map')
	parser.add_argument('--save-segment', action='store_true',
			help='save the 2d sky background and rms map')
	parser.add_argument('--save-model', action='store_true',
			help='save the fitted model and residual image')
	parser.add_argument('--plot', action='store_true',
			help='plot all kinds of corrections in the catalog')
	parser.add_argument('--nthread',type=int,default=0,
			help='number of threads to parallel processing, default 0')
	parser.add_argument('--version', action='version',version='%(prog)s 1.0')
	# parse arguments
	args = parser.parse_args()
	
	###CCD parameters
	saturate=30000.0
	gain=1.4
	rdnoise=8.0
	## detecting and analysis
	detect_nsigma=1.2
	aly_nsigma=1.5
	conv_sigma=1.2 # not using seeing FWHM/2.35 if seeing too large many faint objects cannot be found! we use 1.2 for all bok image
	sky_grid=216
	remove_nthresh=0
	sig_flag=True
	sig_level=3.0
	med_window=21
	remove_big_width=None # remove big objects
	nradius=10 # n*effective radius to create models and residual map
	# photometry
	psfmag_limit=24.0 # SDSS r-band mag limit to get sources
	apertures=[3,4,5,6,8,10,13,16,20,25,30,40]
	sky_annuli=[40,20]
	edge_nfwhm=3.0 # n*fwhm of seeing to regarded as edge pixels
	min_dist=3.3 # min distance between scuss and sdss pixels
	ab_limit=[0.1,10.0]
	re_limit=[1e-10,5.0]
	theta_limit=[None,None]
	nmax_fit=1
	nmax_force_fit=1

	# get list of fits, psfs, flags or weights
	fitslist=args.fits
	flaglist=args.flag
	psflist=args.psfex
	
	nfits=len(fitslist)
	nflag=len(flaglist)
	npsf=len(psflist)
	
	# print help if no parameters provided
	if len(args.fits) == 0:
		parser.print_help()
	# verify whether file numbers are equal
	if npsf !=0 and npsf != nfits:
		print('The number of PSF files should be same as the FITS image')
		sys.exit(-1)
	if nflag !=0 and nflag != nfits:
		print('The number of flag images should be same as the single-epoch images')
		sys.exit(-1)
	
	for i,ifits in enumerate(fitslist):
		start=time.time()
		print('phot '+ifits+' ...')

		# check file exist
		if not os.path.isfile(ifits):
			continue

		# get FITS image
		fitsname,_=os.path.splitext(ifits)
		fitspath,name=os.path.split(ifits)
		data=fits.getdata(ifits)
		head=fits.getheader(ifits)
		head['ctype1']='RA---TPV'
		head['ctype2']='DEC--TPV'
		filt=head['filter']
		if filt == 'bokr': filt='r'

		# calculate the julian day
		datestr=head['date-obs']
		timestr=head['time-obs']
		tt=Time(datestr+' '+timestr,format='iso',scale='utc')
		mjd=tt.mjd

		# get seeing value
		seeing=None
		if 'seeing' in head:
		    seeing=head['seeing']/0.455 # seeing in pixels
	
		#get flag, mask
		mask_sat=data > saturate
		if nflag ==0:
			ccd=str(head['CCD_NO'])
			date=head['date-obs']
			flagfile=get_flagfile(date,ccd)
			flag=fits.getdata(flagfile)
		else:
			flag=fits.getdata(flaglist[i])
		flag=(flag != 0).astype('int')
		mask=(flag != 0) | (data > 70000.0) | (data< 1.0)
		print('flag file: '+flagfile)
			
		# flag the saturated pixels
		flag[mask_sat]=np.bitwise_or(flag[mask_sat],2)

		psffile=fitsname+'.psf'
		# get psf file
		if npsf == 0:
			if not os.path.isfile(psffile):
				get_psfex(ifits)
		else:
			if os.path.isfile(psflist[i]):
				psffile=psflist[i]
			else:
				get_psfex(ifits)
		
		# download sdss catalog
		sdsscat=fitsname+'-sdss.fits'
		ra,dec=fits_center(head)
		print(ra,dec)
		print('fits center: {}, {}'.format(ra,dec))
		if not os.path.isfile(sdsscat):
			d=DownSDSSCat()
			d.get_allmag_cat(ra=ra,dec=dec,width=0.6,psfmag_limit=psfmag_limit,filename=sdsscat,fmt='fits')
		if os.path.isfile(sdsscat):
			print('sdss catalog: '+sdsscat)
		else:
			print('sdss catalog is not found!')

		# processing sdss catalogs: transform the astrophot input parameters and get sdss catalogs within the image

		sdss_params=None
		if os.path.isfile(sdsscat) and os.path.getsize(sdsscat)!=0:
			sdss=fits.getdata(sdsscat,1)
			sdss_params,sdss=params_from_sdss(sdss,head,force_filter=args.force_filter,faint_limit=23.0)

		# get psfex PSF, initialize photometry
		psf=pyphot.psfex.PSFEx()
		try:
			psf.from_fits(psffile)
		except:
			continue
		phot=pyphot.phot.PyPhot(data,uncertainty=None,mask=mask,flags=flag,weight=None,saturate=saturate,gain=gain,rdnoise=rdnoise,seeing=seeing,nthread=args.nthread)

		if os.path.isfile(fitsname+'-sky.fits') and os.path.isfile(fitsname+'-sky_rms.fits'):
			print('get sky and rms map from saved fits')
			phot.sky=fits.getdata(fitsname+'-sky.fits')
			phot.sky_rms=fits.getdata(fitsname+'-sky_rms.fits')
		else:
			phot.get_sky_map(nsample=30000,grid_size=sky_grid)
		# get flux error map	
		if seeing is None:
			phot.get_seeing()
			seeing=phot.seeing

		# define the edge pixels and flags
		edge_pixels=np.ceil(edge_nfwhm*seeing).astype('int')
		if edge_pixels > 0:
			flag[:edge_pixels,:]=np.bitwise_or(flag[:edge_pixels,:],4)
			flag[-edge_pixels:,:]=np.bitwise_or(flag[-edge_pixels:,:],4)
			flag[:,:edge_pixels]=np.bitwise_or(flag[:,:edge_pixels],4)
			flag[:,-edge_pixels:]=np.bitwise_or(flag[:,-edge_pixels:],4)
		phot.flag=flag

		phot.update_data_uncertainty(poisson=True)
		if args.save_sky:
			print('save sky and rms maps')
			fits.writeto(fitsname+'-sky.fits',phot.sky.astype('float32'),header=head,clobber=True,output_verify='fix+warn')
			fits.writeto(fitsname+'-sky_rms.fits',phot.sky_rms.astype('float32'),header=head,clobber=True,output_verify='fix+warn')
		
		# detecting source in bok image
		tmp_prop=phot.detect_objects(detect_nsigma=detect_nsigma,conv_sigma=conv_sigma,analysis_nsigma=aly_nsigma,remove_nthresh=remove_nthresh,sig_flag=sig_flag,sig_level=sig_level,remove_after_segment=False,remove_big_width=remove_big_width)
		coord_scuss=np.array((tmp_prop['x_window'],tmp_prop['y_window'])).transpose()
		nscuss=len(coord_scuss)
		# get SDSS coordinates and combine them into Bok detected sources
		coord=coord_scuss
		if sdss_params is not None:
			coord_sdss=np.array((sdss_params['x'],sdss_params['y'])).transpose()
			index_scuss,index_sdss=closest_match(coord_scuss,coord_sdss,min_dist=min_dist)
			index_scuss=np.array(index_scuss)
			indsdss_match=np.array([idx[0] for idx in index_sdss])
			indsdss_nomatch=np.setdiff1d(np.arange(len(coord_sdss)),indsdss_match)
			nuniq_sdss=len(indsdss_nomatch)
			coord=np.vstack((coord_scuss,coord_sdss[indsdss_nomatch]))
			is_sdss=Table.Column(np.zeros(len(coord)).astype('bool'),'is_sdss')

			
			if len(index_scuss) !=0 :
				is_sdss[index_scuss]=True
			if nuniq_sdss > 0:
				is_sdss[nscuss:]=True
		else:
			is_sdss=Table.Column(np.zeros(len(coord)).astype('bool'),'is_sdss')
		is_bok=Table.Column(np.zeros(len(coord)).astype('bool'),'is_bok')
		is_bok[:nscuss]=True

		# calculate MJD for single-epoch images
		mjd_col=Table.Column(np.ones(len(coord))*mjd,'bok_mjd')
		
		# get detection properties
		if sdss_params is not None:
			det_sc=phot.detect_objects(detect_nsigma=detect_nsigma,conv_sigma=conv_sigma,analysis_nsigma=aly_nsigma,remove_nthresh=remove_nthresh,sig_flag=sig_flag,sig_level=sig_level,remove_after_segment=False,coordinates=coord,remove_big_width=remove_big_width)
		else:
			det_sc=tmp_prop
		# save segmentations
		if args.save_segment:
			fits.writeto(fitsname+'-segment.fits',phot.seglabels,header=head,clobber=True,output_verify='fix+warn')
		param_det=phot.parameter_guess(det_sc,xname='x_window',yname='y_window')

		# clean resulting data
		det_sc=Table(det_sc)
		rename_table(det_sc,prefix='bok_',postfix='_'+filt)
		del_cols=['x_min','x_max','y_min','y_max','cxx','cxx_error','cyy','cyy_error','cxy','cxy_error','flux_kron','flux_kron_error','kron_radius']
		for col in del_cols:
			det_sc.remove_column('bok_'+col+'_'+filt)
		fluxadu=det_sc
		index_det=det_sc['bok_id_'+filt]
		is_sdss=is_sdss[index_det]
		is_bok=is_bok[index_det]
		mjd_col=mjd_col[index_det]
		if sdss_params is not None:
			tmpindex=np.arange(len(coord))
			if len(index_scuss) !=0 :
				tmpindex[index_scuss]=indsdss_match
			tmpindex[nscuss:]=indsdss_nomatch
			tmpindex=tmpindex[index_det]
			tmpindex=tmpindex[is_sdss]
			sdss=sdss[tmpindex]
			sdss_params=sdss_params[tmpindex]

		fluxadu.add_column(mjd_col)
		fluxadu.add_column(is_sdss)
		fluxadu.add_column(is_bok)

		# parameter guess: if SDSS exist use SDSS, else use guess to force photometry
		bok_id='bok_id_'+filt
		if sdss_params is not None:
			param_det['flux_psf'][is_sdss]=sdss_params['flux_psf']
			param_det['re_dev'][is_sdss]=sdss_params['re_dev']
			param_det['re_exp'][is_sdss]=sdss_params['re_exp']
			param_det['ab_dev'][is_sdss]=sdss_params['ab_dev']
			param_det['ab_exp'][is_sdss]=sdss_params['ab_exp']
			param_det['theta_dev'][is_sdss]=sdss_params['theta_dev']
			param_det['theta_exp'][is_sdss]=sdss_params['theta_exp']
			param_det['flux_dev'][is_sdss]=sdss_params['flux_dev']
			param_det['flux_exp'][is_sdss]=sdss_params['flux_exp']
			param_det['frac_dev'][is_sdss]=sdss_params['frac_dev']

			mod_sd1=phot.model_photometry(psf,param_guess=param_det[is_sdss],pos_limit=True,ab_limit=True,re_limit=True,theta_limit=True,nmax_fit=nmax_force_fit,models=['psf','dev','exp'])
			mod_sd1=Table(mod_sd1)
			groupmax=np.max(mod_sd1['group'])
			rename_table(mod_sd1,prefix='bok_',postfix='_'+filt)
			del_cols=['x_in','y_in']#,'x_psf','x_psf_error','y_psf','y_psf_error','x_dev','x_dev_error','y_dev','y_dev_error','re_dev','re_dev_error','ab_dev','ab_dev_error','theta_dev','theta_dev_error','x_exp','x_exp_error','y_exp','y_exp_error','re_exp','re_exp_error','ab_exp','ab_exp_error','theta_exp','theta_exp_error']
			for col in del_cols:
				mod_sd1.remove_column('bok_'+col+'_'+filt)
			mod_sd1[bok_id]=fluxadu[bok_id][is_sdss]
			fluxadu=table.join(fluxadu,mod_sd1,join_type='left')
			fluxadu=fluxadu.filled(fill_value=0)
			#fluxadu=fluxadu.filled()

		tmpmask=np.logical_and(is_bok,np.logical_not(is_sdss))
		if tmpmask.any():
			mod_sd2=phot.model_photometry(psf,param_guess=param_det[tmpmask],pos_limit=True,ab_limit=True,theta_limit=True,re_limit=True,nmax_fit=nmax_force_fit,models=['psf','dev','exp'])
			if sdss_params is not None:
				mod_sd2['group']+=groupmax+1
			mod_sd2=Table(mod_sd2)
			rename_table(mod_sd2,prefix='bok_',postfix='_'+filt)
			del_cols=['x_in','y_in']#,'x_psf','x_psf_error','y_psf','y_psf_error','x_dev','x_dev_error','y_dev','y_dev_error','re_dev','re_dev_error','ab_dev','ab_dev_error','theta_dev','theta_dev_error','x_exp','x_exp_error','y_exp','y_exp_error','re_exp','re_exp_error','ab_exp','ab_exp_error','theta_exp','theta_exp_error']
			for col in del_cols:
				mod_sd2.remove_column('bok_'+col+'_'+filt)
			if sdss_params is not None:
				#if sdss exist merge
				allcols=mod_sd2.colnames
				for col in allcols:
					fluxadu[col][tmpmask]=mod_sd2[col]
			else:
				# if no sdss add new columns
				mod_sd2[bok_id]=fluxadu[bok_id][tmpmask]
				fluxadu=table.join(fluxadu,mod_sd2,join_type='left')
				fluxadu.filled(fill_value=0)

		# merge sdss information
		if sdss_params is not None:
			sdss=Table(sdss)
			rename_table(sdss,prefix='sdss_')
			sdss.add_columns([table.Column(sdss_params['x'],name='sdss_bokx_'+filt), 
				table.Column(sdss_params['y'],name='sdss_boky_'+filt)],indexes=[0,0])
		
			sdss[bok_id]=-1
			sdss[bok_id]=fluxadu[bok_id][is_sdss]
			fluxadu=table.join(fluxadu,sdss,join_type='left')
			fluxadu=fluxadu.filled(fill_value=0)
		# get coordinates for aperture photometry
		aper_coord=np.array([fluxadu['bok_x_window_'+filt],fluxadu['bok_y_window_'+filt]]).transpose()

		print('Aperture Photometry ....')
		aper_prop=phot.aperture_photometry(aper_coord,apertures=apertures,sky_annulus=sky_annuli,aper_type='circle',sky_type='global')
		aper_prop=Table(aper_prop)
		rename_table(aper_prop,prefix='bok_',postfix='_'+filt)
		del_cols=['id','x_in','y_in']
		for col in del_cols:
			aper_prop.remove_column('bok_'+col+'_'+filt)
		fluxadu.add_columns(aper_prop.columns.values())

		#re-order the objects
		fluxadu[bok_id]=np.arange(len(fluxadu))

		if args.save_model:
			tmpdata=fluxadu.copy()
			cols=['id','x_psf','x_psf_error','y_psf','y_psf_error','x_dev','x_dev_error','y_dev','y_dev_error','re_dev','re_dev_error','ab_dev','ab_dev_error','theta_dev','theta_dev_error','x_exp','x_exp_error','y_exp','y_exp_error','re_exp','re_exp_error','ab_exp','ab_exp_error','theta_exp','theta_exp_error','chi2_psf','chi2_dev','chi2_exp','psf_fwhm','flux_psf','flux_dev','flux_exp']
			for col in cols:
				tmpdata.rename_column('bok_'+col+'_'+filt,col)
			model,residual=phot.create_model_residual(tmpdata,psf,nradius=nradius)
			fits.writeto(fitsname+'-model.fits',model.astype('float32'),header=head,clobber=True,output_verify='fix+warn')
			fits.writeto(fitsname+'-residual.fits',residual.astype('float32'),header=head,clobber=True,output_verify='fix+warn')
		
		#del_cols=['x_psf','x_psf_error','y_psf','y_psf_error','x_dev','x_dev_error','y_dev','y_dev_error','re_dev','re_dev_error','ab_dev','ab_dev_error','theta_dev','theta_dev_error','x_exp','x_exp_error','y_exp','y_exp_error','re_exp','re_exp_error','ab_exp','ab_exp_error','theta_exp','theta_exp_error']
		#for col in del_cols:
		#	fluxadu.remove_column('bok_'+col+'_'+filt)
		fluxfile=fitsname+'-fluxadu.fits'
		if os.path.isfile(fluxfile):
			os.remove(fluxfile)
		fluxadu.write(fluxfile,format='fits')
		#fits.writeto(fluxfile,fluxadu.as_array(),clobber=True,output_verify='fix+warn')

		ind_head=head.index('RA-OBS')
		f=fits.open(fluxfile,mode='update')
		f[1].header["apersize"]=('3,4,5,6,8,10,13,16,20,25,30,40','aperture radii in pixels')
		f[1].header.extend(head.cards[ind_head:],bottom=True)
		f.flush(output_verify='fix+warn')
		f.close()

		# calibrate flux and aperture corrections
		fluxcalib=calibrate_fluxadu(fluxadu,head)
		if fluxcalib == False:
			continue
		plotname=None
		if args.plot:
			plotname=fitsname
		fluxcor=magnitude_correction(fluxcalib,head,elp_lim=1.0,sigma=2.5,plot_name=plotname)
		corfile=fitsname+'-fluxcor.fits'
		if os.path.isfile(corfile):
			os.remove(corfile)
		fluxcor.write(corfile,format='fits')
		#fits.writeto(corfile,fluxcor.as_array(),clobber=True,output_verify='fix+warn')
		f=fits.open(corfile,mode='update')
		f[1].header["apersize"]=('3,4,5,6,8,10,13,16,20,25,30,40','aperture radii in pixels')
		f[1].header.extend(head.cards[ind_head:],bottom=True)
		f.flush(output_verify='fix+warn')
		f.close()

		print('Total processing time for '+fitsname,time.time()-start)
# journal_counts = {}
# for journal in os.listdir(main_dir):
#     journal_counts[journal] = sum([len(files) for r, d, files in os.walk(main_dir + "/" + journal)])
# print journal_counts
final_table = Table()
final_columns = []
stat_keys = []
with open('final_analysis_new_java.txt', 'r') as final_analysis:
    for line in final_analysis.readlines():
        for jour in journal_counts.keys():
            if jour in line:
                dict_journ = ast.literal_eval(
                    line[len(jour) + 3:][:-1].translate(None, ')'))
                for key, value in dict_journ.iteritems():
                    dict_journ[key] = value / journal_counts[jour]
                # print (jour, dict_journ)
                stat_keys = dict_journ.keys()
                distri_column = Column(name=jour,
                                       data=list(dict_journ.values()))
                final_columns.append(distri_column)
print("--- %s seconds ---" % (time.time() - start_time))
stat_column = Column(name='stat. Methods', data=stat_keys)
final_table.add_column(stat_column)
final_table.add_columns(final_columns)
# ascii.write(final_table, 'final_distribution_new_second.dat', format='fixed_width')
ascii.write(final_table,
            'final_distribution_new_java.dat',
            format='fixed_width')
# ======================================= END: Generate percentage distribution for all journals ======================================
def load_plot_etc_target_type(metadata_path, spectra_path, test_inds, target_type, no_plot=False,
				save_out=False, restrict_delta=False, use_spca=False, use_pca=False):
    obs_metadata = trim_observation_metadata(load_observation_metadata(metadata_path))
    if use_filter_split:
        c_sources, c_mixing, c_exposures, c_wavelengths, c_filter_split_arr = load_spectra_data(spectra_path,
						target_type=target_type, filter_str='nonem', use_spca=use_spca, use_pca=use_pca)
        c_sources_e, c_mixing_e, c_exposures_e, c_wavelengths_e, c_filter_split_arr_e = load_spectra_data(spectra_path,
						target_type=target_type, filter_str='em', use_spca=use_spca, use_pca=use_pca)
    else:
        c_sources, c_mixing, c_exposures, c_wavelengths, c_filter_split_arr = load_spectra_data(spectra_path,
						target_type=target_type, filter_str='both', use_spca=use_spca, use_pca=use_pca)

    reduced_obs_metadata = obs_metadata[np.in1d(obs_metadata['EXP_ID'], c_exposures)]
    reduced_obs_metadata.sort('EXP_ID')
    sorted_inds = np.argsort(c_exposures)
    if use_filter_split:
        sorted_e_inds = np.argsort(c_exposures_e)

    if not linear_only:
        if reg_type == 'etr':
            rfr = ensemble.ExtraTreesRegressor(n_estimators=n_estimators, min_samples_split=min_samples_split,
                        random_state=rfr_random_state, n_jobs=-1, verbose=False, bootstrap=bootstrap)
            if use_filter_split:
                rfr_e = ensemble.ExtraTreesRegressor(n_estimators=n_estimators, min_samples_split=min_samples_split,
                        random_state=rfr_random_state, n_jobs=-1, verbose=False, bootstrap=bootstrap)
        else:
            rfr = ensemble.RandomForestRegressor(n_estimators=n_estimators, min_samples_split=min_samples_split,
                        random_state=rfr_random_state, n_jobs=-1, verbose=False, bootstrap=bootstrap)
            if use_filter_split:
                rfr_e = ensemble.RandomForestRegressor(n_estimators=n_estimators, min_samples_split=min_samples_split,
                        random_state=rfr_random_state, n_jobs=-1, verbose=False, bootstrap=bootstrap)
        if include_knn:
            knn = neighbors.KNeighborsRegressor(weights='distance', n_neighbors=10, p=64)
            if use_filter_split:
                knn_e = neighbors.KNeighborsRegressor(weights='distance', n_neighbors=10, p=64)

    if include_linear:
        linear = Linear(fit_intercept=True, copy_X=True, n_jobs=-1)
        poly_2_linear = Pipeline([('poly', PolynomialFeatures(degree=2)),
                            ('linear', Linear(fit_intercept=True, copy_X=True, n_jobs=-1))])
        poly_3_linear = Pipeline([('poly', PolynomialFeatures(degree=3)),
                        ('linear', Linear(fit_intercept=True, copy_X=True, n_jobs=-1))])
        poly_4_linear = Pipeline([('poly', PolynomialFeatures(degree=4)),
                        ('linear', Linear(fit_intercept=True, copy_X=True, n_jobs=-1))])
        if use_filter_split:
            linear_e = Linear(fit_intercept=True, copy_X=True, n_jobs=-1)
            poly_2_linear_e = Pipeline([('poly', PolynomialFeatures(degree=2)),
                            ('linear', Linear(fit_intercept=True, copy_X=True, n_jobs=-1))])
            poly_3_linear_e = Pipeline([('poly', PolynomialFeatures(degree=3)),
                        ('linear', Linear(fit_intercept=True, copy_X=True, n_jobs=-1))])
            poly_4_linear_e = Pipeline([('poly', PolynomialFeatures(degree=4)),
                        ('linear', Linear(fit_intercept=True, copy_X=True, n_jobs=-1))])

    reduced_obs_metadata.remove_column('EXP_ID')
    md_len = len(reduced_obs_metadata)
    var_count = len(reduced_obs_metadata.columns)
    X_arr = np.array(reduced_obs_metadata).view('f8').reshape((md_len,-1))

    ica = None
    if not use_spca and not use_pca:
        if use_filter_split:
            ica = ICAize.unpickle_FastICA(path=spectra_path, target_type=target_type, filter_str='nonem')
            ica_e = ICAize.unpickle_FastICA(path=spectra_path, target_type=target_type, filter_str='em')
        else:
            ica = ICAize.unpickle_FastICA(path=spectra_path, target_type=target_type, filter_str='both')
    elif use_spca:
        ica = ICAize.unpickle_SPCA(path=spectra_path, target_type=target_type)
    else:
        if use_filter_split:
            ica = ICAize.unpickle_PCA(path=spectra_path, target_type=target_type, filter_str='nonem')
            ica_e = ICAize.unpickle_PCA(path=spectra_path, target_type=target_type, filter_str='em')
        else:
            ica = ICAize.unpickle_PCA(path=spectra_path, target_type=target_type, filter_str='both')

    spectra_dir_list = os.listdir(spectra_path)

    ################################################################
    results = None
    for test_ind in test_inds:
        test_X = X_arr[test_ind]
        train_X = np.vstack( [X_arr[:test_ind], X_arr[test_ind+1:]] )
        test_y =  (c_sources[sorted_inds])[test_ind]
        train_y = np.vstack( [(c_sources[sorted_inds])[:test_ind], (c_sources[sorted_inds])[test_ind+1:]] )
        if use_filter_split:
            test_y_e =  (c_sources_e[sorted_e_inds])[test_ind]
            train_y_e = np.vstack( [(c_sources_e[sorted_e_inds])[:test_ind], (c_sources_e[sorted_e_inds])[test_ind+1:]] )

        if scale:
            scaler = StandardScaler(with_std=scale_std)
            train_X = scaler.fit_transform(train_X)
            test_X = scaler.transform(test_X)

        title_str = "exp{}, {}".format(c_exposures[sorted_inds[test_ind]], target_type)

        if not linear_only:
            rfr.fit(X=train_X, y=train_y)
            if use_filter_split:
                rfr_e.fit(X=train_X, y=train_y_e)
            if include_knn:
                knn.fit(X=train_X, y=train_y)
                if user_filter_split:
                    knn_e.fit(X=train_X, y=train_y_e)

        if include_linear:
            linear.fit(train_X, train_y)
            poly_2_linear.fit(train_X, train_y)
            if order_3:
                poly_3_linear.fit(train_X, train_y)
            if order_4:
                poly_4_linear.fit(train_X, train_y)
        if use_filter_split and include_linear:
            linear_e.fit(train_X, train_y_e)
            poly_2_linear_e.fit(train_X, train_y_e)
            if order_3:
                poly_3_linear_e.fit(train_X, train_y_e)
            if order_4:
                poly_4_linear_e.fit(train_X, train_y_e)

        print test_ind, c_exposures[sorted_inds[test_ind]],

        data = None
        actual = None
        mask = None
        delta_mask = None
        ivar = None

        for file in spectra_dir_list:
            if fnmatch.fnmatch(file, "stacked_sky_*exp{}.csv".format(c_exposures[sorted_inds[test_ind]])):
                data = Table.read(os.path.join(spectra_path, file), format="ascii.csv")
                ivar = data['ivar']
                mask = (data['ivar'] == 0)
                delta_mask = mask.copy()
                if restrict_delta:
                    if restrict_color == 'blue':
                        delta_mask[2700:] = True
                    else:
                        delta_mask[:2700] = True

                actual = data['flux']
                break
        if actual is None:
            continue

        if not linear_only:
            rfr_prediction = rfr.predict(test_X)
            if not use_spca and not use_pca:
                rfr_predicted = ica.inverse_transform(rfr_prediction, copy=True)
            else:
                rfr_predicted = np.zeros( (1, ica.components_.shape[1]) )
                rfr_predicted[0,:] = np.sum(rfr_prediction.T * ica.components_, 0)

            if use_filter_split:
                rfr_e_prediction = rfr_e.predict(test_X)
                if not use_spca and not use_pca:
                    rfr_e_predicted = ica_e.inverse_transform(rfr_e_prediction, copy=True)
                else:
                    rfr_e_predicted = np.zeros( (1, ica_e.components_.shape[1]) )
                    rfr_e_predicted[0,:] = np.sum(rfr_e_prediction.T * ica_e.components_, 0)
                rfr_predicted = rfr_predicted + rfr_e_predicted

            rfr_delta = rfr_predicted[0] - actual
            if not no_plot:
                plt.plot(c_wavelengths[~mask], rfr_predicted[0][~mask])
                plt.plot(c_wavelengths[~mask], actual[~mask])
                plt.plot(c_wavelengths[~mask], rfr_delta[~mask])
            if not no_plot:
                plt.plot(c_wavelengths, [0]*len(c_wavelengths))
            err_term = np.sum(np.power(rfr_delta[~delta_mask], 2))/len(c_wavelengths[~delta_mask])
            err_sum = np.sum(rfr_delta[~delta_mask])/len(rfr_delta[~delta_mask])
            red_chi = np.sum(np.power(rfr_delta[~delta_mask], 2)*ivar[~delta_mask])/(len(c_wavelengths[~delta_mask])-var_count-1)
            if not no_plot:
                plt.legend(['Predicted', 'Actual', 'Delta {:0.5f}'.format(err_term)])
                plt.tight_layout()
                plt.title("Random Forest Regressor: {}".format(title_str))
                plt.show()
                plt.close()
            print err_term, red_chi, err_sum,

            if include_knn:
                knn_prediction = knn.predict(test_X)
                if not use_spca and not use_pca:
                    knn_predicted = ica.inverse_transform(knn_prediction, copy=True)
                else:
                    knn_predicted = np.zeros( (1, ica.components_.shape[1]) )
                    knn_predicted[0,:] = np.sum(knn_prediction.T * ica.components_, 0)

                if use_filter_split:
                    knn_e_prediction = knn_e.predict(test_X)
                    if not use_spca and not use_pca:
                        knn_e_predicted = ica_e.inverse_transform(knn_e_prediction, copy=True)
                    else:
                        knn_e_predicted = np.zeros( (1, ica_e.components_.shape[1]) )
                        knn_e_predicted[0,:] = np.sum(knn_e_prediction.T * ica_e.components_, 0)
                    knn_predicted = knn_predicted + knn_e_predicted

                if not no_plot:
                    plt.plot(c_wavelengths[~mask], knn_predicted[0][~mask])
                    plt.plot(c_wavelengths[~mask], actual[~mask])
                knn_delta = knn_predicted[0] - actual
                err_term = np.sum(np.power(knn_delta[~delta_mask], 2))/len(c_wavelengths[~delta_mask])
                err_sum = np.sum(knn_delta[~delta_mask])/len(knn_delta[~delta_mask])
                red_chi = np.sum(np.power(knn_delta[~delta_mask], 2)*ivar[~delta_mask])/(len(c_wavelengths[~delta_mask])-var_count-1)

                if not no_plot:
                    plt.plot(c_wavelengths[~mask], knn_delta[~mask])
                    plt.plot(c_wavelengths, [0]*len(c_wavelengths))
                    plt.legend(['Predicted', 'Actual', 'Delta {:0.5f}'.format(err_term)])
                    plt.tight_layout()
                    plt.title("Good 'ol K-NN: {}".format(title_str))
                    plt.show()
                    plt.close()
                print err_term, red_chi, err_sum,

        if include_linear:
            poly_1_prediction = linear.predict(test_X)
            if not use_spca and not use_pca:
                poly_1_predicted = ica.inverse_transform(poly_1_prediction, copy=True)
            else:
                poly_1_predicted = np.zeros( (1, ica.components_.shape[1]) )
                poly_1_predicted[0,:] = np.sum(poly_1_prediction.T * ica.components_, 0)

            if use_filter_split:
                poly_1_e_prediction = linear.predict(test_X)
                if not use_spca and not use_pca:
                    poly_1_e_predicted = ica_e.inverse_transform(poly_1_e_prediction, copy=True)
                else:
                    poly_1_e_predicted = np.zeros( (1, ica_e.components_.shape[1]) )
                    poly_1_e_predicted[0,:] = np.sum(poly_1_e_prediction.T * ica_e.components_, 0)
                poly_1_predicted = poly_1_predicted + poly_1_e_predicted

            poly_1_delta = poly_1_predicted[0] - actual

            if not no_plot:
                plt.plot(c_wavelengths[~mask], poly_1_predicted[0][~mask])
                plt.plot(c_wavelengths[~mask], actual[~mask])
            err_term = np.sum(np.power(poly_1_delta[~delta_mask], 2))/len(c_wavelengths[~delta_mask])
            err_sum = np.sum(poly_1_delta[~delta_mask])/len(poly_1_delta[~delta_mask])
            red_chi = np.sum(np.power(poly_1_delta[~delta_mask], 2)*ivar[~delta_mask])/(len(c_wavelengths[~delta_mask])-var_count-1)

            if not no_plot:
                plt.plot(c_wavelengths[~mask], poly_1_delta[~mask])
                plt.plot(c_wavelengths, [0]*len(c_wavelengths))
                plt.legend(['Predicted', 'Actual', 'Delta {:0.5f}'.format(err_term)])
                plt.tight_layout()
                plt.title("Poly 1: {}".format(title_str))
                plt.show()
                plt.close()

            print err_term, red_chi, err_sum,

            poly_2_prediction = poly_2_linear.predict(test_X)
            if not use_spca and not use_pca:
                poly_2_predicted = ica.inverse_transform(poly_2_prediction, copy=True)
            else:
                poly_2_predicted = np.zeros( (1, ica.components_.shape[1]) )
                poly_2_predicted[0,:] = np.sum(poly_2_prediction.T * ica.components_, 0)

            poly_2_delta = poly_2_predicted[0] - actual

            if not no_plot:
                plt.plot(c_wavelengths[~mask], poly_2_predicted[0][~mask])
                plt.plot(c_wavelengths[~mask], actual[~mask])
            err_term = np.sum(np.power(poly_2_delta[~delta_mask], 2))/len(c_wavelengths[~delta_mask])
            err_sum = np.sum(poly_2_delta[~delta_mask])/len(poly_2_delta[~delta_mask])
            red_chi = np.sum(np.power(poly_2_delta[~delta_mask], 2)*ivar[~delta_mask])/(len(c_wavelengths[~delta_mask])-var_count-1)

            if not no_plot:
                plt.plot(c_wavelengths[~mask], poly_2_delta[~mask])
                plt.plot(c_wavelengths, [0]*len(c_wavelengths))
                plt.legend(['Predicted', 'Actual', 'Delta {:0.5f}'.format(err_term)])
                plt.tight_layout()
                plt.title("Poly 2: {}".format(title_str))
                plt.show()
                plt.close()

            print err_term, red_chi, err_sum,
            err_ind =+ 1

            if order_3:
                poly_3_prediction = poly_3_linear.predict(test_X)
                if not use_spca and not use_pca:
                    poly_3_predicted = ica.inverse_transform(poly_3_prediction, copy=True)
                else:
                    poly_3_predicted = np.zeros( (1, ica.components_.shape[1]) )
                    poly_3_predicted[0,:] = np.sum(poly_3_prediction.T * ica.components_, 0)

                poly_3_delta = poly_3_predicted[0] - actual

                if not no_plot:
                    plt.plot(c_wavelengths[~mask], poly_3_predicted[0][~mask])
                    plt.plot(c_wavelengths[~mask], actual[~mask])
                err_term = np.sum(np.power(poly_3_delta[~delta_mask], 2))/len(c_wavelengths[~delta_mask])
                err_sum = np.sum(poly_3_delta[~delta_mask])/len(poly_3_delta[~delta_mask])
                red_chi = np.sum(np.power(poly_3_delta[~delta_mask], 2)*ivar[~delta_mask])/(len(c_wavelengths[~delta_mask])-var_count-1)

                if not no_plot:
                    plt.plot(c_wavelengths[~mask], poly_3_delta[~mask])
                    plt.plot(c_wavelengths, [0]*len(c_wavelengths))
                    plt.legend(['Predicted', 'Actual', 'Delta {:0.5f}'.format(err_term)])
                    plt.tight_layout()
                    plt.title("Poly 3: {}".format(title_str))
                    plt.show()
                    plt.close()

                print err_term, red_chi, err_sum,
                err_ind =+ 1

            if order_4:
                poly_4_prediction = poly_4_linear.predict(test_X)
                if not use_spca and not use_pca:
                    poly_4_predicted = ica.inverse_transform(poly_4_prediction, copy=True)
                else:
                    poly_4_predicted = np.zeros( (1, ica.components_.shape[1]) )
                    poly_4_predicted[0,:] = np.sum(poly_4_prediction.T * ica.components_, 0)

                poly_4_delta = poly_4_predicted[0] - actual

                if not no_plot:
                    plt.plot(c_wavelengths[~mask], poly_4_predicted[0][~mask])
                    plt.plot(c_wavelengths[~mask], actual[~mask])
                err_term = np.sum(np.power(poly_4_delta[~delta_mask], 2))/len(c_wavelengths[~delta_mask])
                err_sum = np.sum(poly_4_delta[~delta_mask])/len(poly_4_delta[~delta_mask])
                red_chi = np.sum(np.power(poly_4_delta[~delta_mask], 2)*ivar[~delta_mask])/(len(c_wavelengths[~delta_mask])-var_count-1)

                if not no_plot:
                    plt.plot(c_wavelengths[~mask], poly_4_delta[~mask])
                    plt.plot(c_wavelengths, [0]*len(c_wavelengths))
                    plt.legend(['Predicted', 'Actual', 'Delta {:0.5f}'.format(err_term)])
                    plt.tight_layout()
                    plt.title("Poly 4: {}".format(title_str))
                    plt.show()
                    plt.close()

                print err_term, red_chi, err_sum,
                err_ind =+ 1

        print

        if save_out:
            out_table = Table()
            wavelength_col = Column(c_wavelengths, name="wavelength", dtype=float)
            out_table.add_columns([wavelength_col])

            if not linear_only:
                rf_col = Column(rfr_predicted[0], name="rf_flux", dtype=float)
                out_table.add_columns([rf_col])

                if include_knn:
                    knn_col = Column(knn_predicted[0], name="knn_flux", dtype=float)
                    avg_col = Column(avg_predicted[0], name="avg_flux", dtype=float)
                    out_table.add_columns([knn_col, avg_col])

            if include_linear:
                poly_1_col = Column(poly_1_predicted[0], name="poly_1_flux", dtype=float)
                poly_2_col = Column(poly_2_predicted[0], name="poly_2_flux", dtype=float)
                out_table.add_columns([poly_1_col, poly_2_col])
                if order_3:
                    poly_3_col = Column(poly_3_predicted[0], name="poly_3_flux", dtype=float)
                    out_table.add_columns([poly_3_col])
                if order_4:
                    poly_4_col = Column(poly_4_predicted[0], name="poly_4_flux", dtype=float)
                    out_table.add_columns([poly_4_col])

            mask_col = Column(~mask, name="mask_col", dtype=bool)
            out_table.add_columns([mask_col])

            out_table.write("predicted_sky_exp{}.csv".format(c_exposures[sorted_inds[test_ind]]), format="ascii.csv")
Beispiel #44
0
def bal_cluster(line, k, g):

    """ unsupervised clustering using KMeans.
    param:
    line: which line to use for the clustering features
    k: number of clusters
    g0: normalized EW, Vmin, Vmax
    g1: normalized EW, Vmin, dV
    g2: normalized EW, Vmax, dV
    g3: normalized EW, Vmin, EW/dv
    g4: normalized EW, Vmax, EW/dv
    g5: normalized EW, Vmin, Vmax, dV
    g6: normalized EW, Vmin, Vmax, EW/dv
    g7: normalized Vmin, Vmax, EW/dV
    g8: normalized EW, dV, EW/dV
    g9: normalized EW, dV
    
    g11: normalized EW, Vmax, BI0/dV
    g12: normalized EW, Vmin, Vmax, BI0/dV
    g13: normalized BI0, Vmax, BI0/dV
    g14: normalized BI0, Vmin, Vmax, BI0/dV
    g15: normalized BI0, dV, Vmax
    g16: normalized BI0, EW, Vmin, Vmax, dV, BI0/dV
    g17: normalized BI0, EW, Vmin, Vmax, BI0/dV
    
    g18: normalized EW, Vmin, Vmax, dV/EW
    
    """
    
    clstr_name= line+str(k)


    data= Table.read('myBALs.fits')

    #data= Table.read('myBALCat_xtra.csv', format= 'ascii.csv')

    #selec the sample: line has an absorption trough: BI0 >0 , S/N >3, and a redshift cutoff to restrict the bandwidth

    if line == "MgII":
        z1= 1.1
        z2= 2.2
        lum= "logF2500"
    
    else:
        z1= 1.79
        z2= 3.7
        lum= "logF1400"


    s= data[(data[line+'-BIO'] >0) & (data['SN1700'] >3) & (data['Z_HW'] >z1) & (data['Z_HW'] <z2)]# & (data[lum] !=-999)]
    
    print "sample has", len(s), "objects"


    #features
    """
    redshift= s['Z_HW']
    bi= s[line+'-BIO']
    ew= s[line+'-EW']
    vmin= s[line+'-vmin']
    vmax= s[line+'-vmax']
    fdeep= s[line+'-fdeep']

    """
    s['Z_HW'].fill_value= -999
    redshift= s['Z_HW'].filled() #redshift

    s[line+'-BI'].fill_value= -999
    bi= s[line+'-BI'].filled() # balnicity: integration 3000-25000

    s[line+'-BIO'].fill_value= -999
    bi0= s[line+'-BIO'].filled() # modified balnicity: integration 0-25000

    s[line+'-EW'].fill_value= -999
    ew= s[line+'-EW'].filled() # restframe absorption EW

    s[line+'-vmin'].fill_value= -999
    vmin= s[line+'-vmin'].filled() # minimum velocity

    s[line+'-vmax'].fill_value= -999
    vmax= s[line+'-vmax'].filled() # maximum velocity

    s[line+'-fdeep'].fill_value= -999
    fdeep= s[line+'-fdeep'].filled()
    
    s[lum].fill_value= -999
    cl= s[lum].filled() # Log of 1400 or 2500 monochromatic luminosity

    s['SDSSName'].fill_value= -999
    names= s['SDSSName'].filled() # SDSS name
    
    dv= vmax- vmin # delta v
    
    dd= ew/dv # some sort of an estimate of the trough depth
    
    dp= bi0/dv # another dimensionless parameter to estimate trough depth
    
    dc4= dv/ew # another param to estimate the depth of CIV
    
    #standardize (normalized) parameters before using them in clustering
    ew_n= (ew - mean(ew))/std(ew)
    vmin_n= (vmin - mean(vmin))/std(vmin)
    vmax_n= (vmax - mean(vmax))/std(vmax)
    dv_n = (dv - mean(dv))/std(dv)
    dd_n= (dd- mean(dd))/std(dd)
    bi_n= (bi0 - mean(bi0))/std(bi0)
    dp_n= (dp - mean(dp))/std(dp)
    dc4_n= (dc4- mean(dc4))/std(dc4)
    
    #cl_n= (cl - mean(cl))/std(cl)


    if g==  'g0':
    
        f = [ew_n, vmin_n, vmax_n]
        colnames= ('EW', 'Vmin', 'Vmax')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g1':
        
        f = [ew_n, vmin_n, dv_n]
        colnames= ('EW', 'Vmin', 'dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g2':
    
        f = [ew_n, vmax_n, dv_n]
        colnames= ('EW', 'Vmax', 'dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g3':
        f= [ew_n, vmin_n, dd_n]
        colnames= ('EW', 'Vmin', 'EW_dV')
        datatype= ('float64', 'float64', 'float64')
    
    elif g== 'g4':
        f= [ew_n, vmax_n, dd_n]
        colnames= ('EW', 'Vmax', 'EW_dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g5':
        f = [ew_n, vmin_n, vmax_n, dv_n]
        colnames= ('EW', 'Vmin', 'Vmax', 'dV')
        datatype= ('float64', 'float64', 'float64', 'float64')
    
    elif g== 'g6':
        f= [ew_n, vmin_n, vmax_n, dd_n]
        colnames= ('EW', 'Vmin', 'Vmax', 'EW_dV')
        datatype= ('float64', 'float64', 'float64', 'float64')

    elif g== 'g7':
        f= [vmin_n, vmax_n, dd_n]
        colnames= ('Vmin', 'Vmax', 'EW_dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g8':
        f= [ew_n, dv_n, dd_n]
        colnames= ('EW', 'dV', 'EW_dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g9':
        f= [ew_n, dv_n]
        colnames= ('EW', 'dV')
        datatype= ('float64', 'float64')

    elif g== 'g11':
        f= [ew_n, vmax_n, dp_n]
        colnames= ('EW', 'Vmax', 'BI0_dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g12':
        f= [ew_n, vmin_n, vmax_n, dp_n]
        colnames= ('EW', 'Vmin', 'Vmax', 'BI0_dV')
        datatype= ('float64', 'float64', 'float64', 'float64')

    elif g== 'g13':
        f= [bi_n, vmax_n, dp_n]
        colnames= ('BI0', 'Vmax', 'BI0_dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g14':
        f= [bi_n, vmin_n, vmax_n, dp_n]
        colnames= ('BI0', 'Vmin', 'Vmax', 'BI0_dV')
        datatype= ('float64', 'float64', 'float64', 'float64')

    elif g== 'g15':
        f= [bi_n, vmax_n, dv_n]
        colnames= ('BI0', 'Vmax', 'dV')
        datatype= ('float64', 'float64', 'float64')

    elif g== 'g16':
        f= [bi_n, ew_n, vmin_n, vmax_n, dv_n, dp_n]
        colnames= ('BI0', 'EW', 'Vmin', 'Vmax', 'dV', 'BI0_dV')
        datatype= ('float64', 'float64', 'float64', 'float64', 'float64', 'float64')

    elif g== 'g18':
        f= [ew_n, vmin_n, vmax_n, dc4_n]
        colnames= ['EW', 'Vmin', 'Vmax', 'dCIV']
        datatype= ('float64', 'float64', 'float64', 'float64')


    qs= np.column_stack(param for param in f) # 2D array to do clustering on

    #do the clustering

    kmeans= KMeans(init= 'k-means++', n_clusters= k, n_init= 10)
    kmeans.fit(qs)
    labels= kmeans.predict(qs)
    sc= metrics.silhouette_score(qs, labels)
    cntrs= kmeans.cluster_centers_

    ## file to save centroids and silhouette scores for every run
    param_f= open("./clusters/"+g+"/"+clstr_name+"param.txt", 'wr')

    param_f.write(g+", \t features: "+str(colnames)+ "\n")

    param_f.write("Silhouette score= "+ str(sc)+ "\n")

    param_f.write("centroids: "+"\n"+str(cntrs)+ "\n")

    param_f.close()

    print "Silhouette score= ", sc
    print "centroids: "+"\n", cntrs

    # save the results in a FITS table

    # table with clustering results
    clstr_tbl= Table([qs[:,b] for b in range(len(f))], names= colnames, dtype= datatype)

    print "table has", len(clstr_tbl), "objects"

    lab= Column(name= 'label', data= labels)

    clstr_tbl.add_columns([lab, names, redshift])

    clstr_tbl.write("./clusters/"+g+"/"+clstr_name+"clstrs.fits", format= 'fits')


    return
Beispiel #45
0
from glob import glob
from os import path as pth
from astropy.io import fits, ascii
from astropy.table import Table

files = glob("wfc3_ir_f*_mjd_*_syn.fits")
output_format = "../TER_filter_{}.dat"

for fname in files:
    data = fits.getdata(fname)
    tbl = Table(data)
    tbl.columns[0].name = "wavelength"
    tbl.columns[1].name = "transmission"
    new_table = Table()
    new_table.add_columns([tbl["wavelength"], tbl["transmission"]])
    cmts = ["author: Kieran Leschinski",
            "source: https://stsynphot.readthedocs.io/en/latest/stsynphot/data_hst.html",
            "description: WFC3 IR Filter Curve",
            "date_created: 2019-11-19",
            "date_modified: 2019-11-19",
            "status: measured",
            "orig_filename: {}".format(fname),
            "wavelength_unit: Angstrom"]
    new_table.meta["comments"] = cmts
    new_table.write(output_format.format(fname[8:13].upper()),
                    format="ascii.fixed_width", overwrite=True, delimiter=None)
Beispiel #46
0
def create_young_pop(imf,a,difflen):
    #Remove last element of parameter of array as this is not an IMF parameter
   # a = a[0:len(a)-1]
    #Possible imf keywords: Chabrier-log ,Kroupa
    nsamp = 250
    #Full sample of objects should be same length as background-corrected observed: 11719
    nobjs = difflen
    t,e = [],[]
    full_m = np.linspace(0.07,0.4,nsamp)

    start = time.time()
    
    if 'chab' in imf:
        for i in range(nsamp):
    		#Generate FULL distribution of masses and probabilities, to a fine resolution: e = number of each mass, need to normalise?
            e.append(chab_log_imf(full_m[i],a))

    elif 'kroup' in imf:
        for i in range(nsamp):
            e.append(kroupa_imf(full_m[i],a))

    elif 'salp':
        for i in range(nsamp):
            e.append(salp_imf(full_m[i],a))

        #Masses of objects   
    e = [a/max(e) for a in e]

    young_obj_masses = choices(full_m,weights=e,k=nobjs)
    end = time.time()
    #print('Took',str(end-start))
    mass_col = Column(young_obj_masses,name='Mass(msol)')
    
    #Extinctions of objects: random choices from a distribution with weights
    #av_dist = Table.read('serpens_extinction_probabilities/ss_av_converted_prob_density_higher_conversion.txt',format='ascii')
    #av_list = choices(av_dist['Av Value'],weights=av_dist['Frequency'],k=nobjs)

    #Extinctions: random choices without weights
    #alist = np.arange(10.0,30.25,0.25)
    alist = np.arange(0.0,5.0,0.25)
    av_list = np.random.choice(alist,nobjs)
    #av_list = [0 for i in range(nobjs)]

    av_col = Column(av_list,name='RAv')
    
    t = Table()    
    #Option to have distance variation
    DIST = False

    if DIST:
    #Distances of objects - just use gaussian distribution for now
        dist = [438,11]
        dist_list = []
        for i in range(nsamp*100):
            dist_list.append(np.random.normal(dist[0],dist[1]))

        dhist,dvals = np.histogram(dist_list,bins=10000)
        dvals = dvals[0:len(dvals)-1]
        d_list = choices(dvals,weights=dhist,k=nobjs)
        d_col = Column(d_list,name='Dist(pc)')
        t.add_columns([mass_col,av_col,d_col])
    t.add_columns([mass_col,av_col])

    start = time.time()
    t = add_spt(t,'Msun','Mass(msol)','y')
    end = time.time()
    #print('Took',str(end-start))

    return t
Beispiel #47
0
def add_gaia(data):
    """ Add GAIA data to input structure, with 2MASS match and coordinate match to (cross-matched) GAIA reference file
    """

    # get the GAIA data from both matches
    gaia_twomass, gaia_posn = getdata(data)

    # add new columns
    tab = Table(data)
    in_names = ('source_id', 'parallax', 'parallax_error', 'pmra',
                'pmra_error', 'pmdec', 'pmdec_error', 'phot_g_mean_mag',
                'phot_bp_mean_mag', 'phot_rp_mean_mag', 'a_g_val',
                'e_bp_min_rp_val', 'radial_velocity', 'radial_velocity_error',
                'r_est', 'r_lo', 'r_hi')
    dtypes = ('i8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f4', 'f4', 'f4', 'f4',
              'f4', 'f8', 'f8', 'f8', 'f8', 'f8')
    out_names = []
    for name in in_names:
        out_names.append(('gaia_' + name).upper())
    # initialize
    newcols = Table(np.zeros([len(tab), len(out_names)]) - 9999.,
                    names=out_names,
                    dtype=dtypes)
    # for source_id, default to 0, not -9999.
    newcols['GAIA_SOURCE_ID'] = 0

    # rename targetting proper motions to avoid confusion!
    try:
        tab.rename_column('PMRA', 'TARG_PMRA')
    except:
        pass
    try:
        tab.rename_column('PMDEC', 'TARG_PMDEC')
    except:
        pass
    try:
        tab.rename_column('PM_SRC', 'TARG_PM_SRC')
    except:
        pass
    # add unpopulated columns
    tab.add_columns(newcols.columns.values())

    # remove dups in GAIA twomass in favor of brightest
    print('number in GAIA-2MASS xmatch catalog: ', len(gaia_twomass),
          len(set(gaia_twomass['original_ext_source_id'])))
    ind = []
    for tid in set(gaia_twomass['original_ext_source_id']):
        j = np.where(gaia_twomass['original_ext_source_id'] == tid)[0]
        if len(j) > 1:
            ii = np.argsort(gaia_twomass['phot_rp_mean_mag'][j])
            ind.append(j[ii[0]])
            print('duplicate 2MASS: ', gaia_twomass['phot_rp_mean_mag'][j[ii]])
        else:
            ind.append(j)

    # read gaia 2MASS matched file, match by 2MASS ID, and populate
    while True:
        # loop for matches since we may have repeats and want them all matched
        j = np.where(tab['GAIA_SOURCE_ID'] == 0)[0]
        print('Number missing gaia_source_id: ', len(j))
        if len(j) == 0: break
        m1, m2 = match.match(
            np.core.defchararray.replace(tab['APOGEE_ID'][j], b'2M', b''),
            gaia_twomass['original_ext_source_id'])
        print('Number matched by 2MASS: ', len(m1))
        if len(m1) == 0: break
        for inname, outname in zip(in_names, out_names):
            tab[outname][j[m1]] = gaia_twomass[inname][m2]

    j = np.where(tab['GAIA_SOURCE_ID'] > 0)[0]
    print('number of unique APOGEE_ID matches: ',
          len(set(tab['APOGEE_ID'][j])))

    j = np.where(tab['GAIA_SOURCE_ID'] == 0)[0]
    print('missing sources after 2MASS matches: ', len(j))
    h = htm.HTM()
    # now do a positional match, take the brightest object within 3 arcsec (which is the max from the GAIA crossmatch)
    maxrad = 3. / 3600.
    m1, m2, rad = h.match(tab['RA'][j],
                          tab['DEC'][j],
                          gaia_posn['ra'],
                          gaia_posn['dec'],
                          maxrad,
                          maxmatch=10)
    for m in set(m1):
        jj = np.where(m1 == m)[0]
        ii = np.argsort(gaia_posn['phot_rp_mean_mag'][m2[jj]])
        for inname, outname in zip(in_names, out_names):
            tab[outname][j[m]] = gaia_posn[inname][m2[jj[ii[0]]]]

    j = np.where(tab['GAIA_SOURCE_ID'] == 0)[0]
    print('missing sources after second match: ', len(j))

    # replace NaNs
    for name in out_names:
        bd = np.where(np.isnan(tab[name]))[0]
        tab[name][bd] = -9999.

    return tab
Beispiel #48
0
def unify_cluster_catalog_GMBCG(catalog, cosmo):
    """For a given catalog of GMBCG clusters, it unifies it to the
    required properties (RA, DEC, redshift, richness, Mass, etc) as
    used in our analysis. It returns a new catalog of only relevant
    information."""
    
    clusters = Table()
    
    #Unify name
    name = Column(name='NAME_ORIG',data=catalog['NAME'])
    clusters.add_column(name)
    
    #Unify ID, add column with original indices for hao
    objid = np.array([int(i+1) for i in range(len(catalog))])
    objid = Column(name = 'objid_gmbcg', data=objid)
    clusters.add_column(objid)
    
    #Unify RA/DEC
    ra  = Column(name='ra_d' ,data=catalog['RA'])
    dec = Column(name='dec_d',data=catalog['DEC'])
    clusters.add_columns([ra,dec])
    
    #unify best redshifts (spec-z when available, otherwise photo-z)
    z = np.where(catalog['SPZ']!=0,catalog['SPZ'],catalog['PHOTOZ'])
    z = Column(name = 'redshift', data=z)
    clusters.add_column(z)
    z_err = np.where(catalog['SPZ']!=0,0.0002,catalog['PHOTOZ_ERR'])
    z_err = Column(name='redshift_err',data=z_err)
    clusters.add_column(z_err)
    
    #add photometric and spectroscopic redshifts
    zsp = Column(name='zsp',data=np.where(catalog['SPZ']>0,catalog['SPZ'],-1))
    zph = Column(name='zph',data=catalog['PHOTOZ'])
    zph_err = Column(name='zph_err',data=catalog['PHOTOZ_ERR'])
    clusters.add_columns([zsp,zph,zph_err])
    
    #add flag_spec
    flag_zsp = (catalog['SPZ'] > 0)*1.
    flag_zsp = Column(name='flag_zsp',data=flag_zsp)
    clusters.add_column(flag_zsp)

    #create unified richness for hao
    richness = np.where(catalog['WEIGHTOK'],catalog['GM_NGALS_WEIGHTED'],catalog['GM_SCALED_NGALS'])
    richness = Column(name = 'richness', data=richness)
    clusters.add_column(richness)

    #add mass estimate
    #add column with cluster mass using calibration given by http://gmbcg.blogspot.com/
    #cluster mass = 8.8e13*((0.66*Ngals+9.567)/20.)^1.28 (h^-1 M_sun)
    mass = 8.8e13 * np.power((0.66 * clusters['richness'] + 9.567)/20,1.28)/(cosmo.H0/100) # in M_sun/h
    mass = Column(name = 'mass', data=mass)
    clusters.add_column(mass)
    
    #add column with r200 in Mpc
    rho_200 = 200 * cosmo.critical_density(clusters['redshift']).to('Msun/Mpc3') #in Msun/Mpc3
    mass_200 = clusters['mass']*u.Msun # in Msun
    r_200 = np.power(mass_200/rho_200/(4*np.pi/3.), 1/3.).value #in Mpc
    clusters['r200_mpc'] = r_200

    # give unified name
    name = [give_name(ra,dec) for ra,dec in zip(clusters['ra_d'],clusters['dec_d'])]
    clusters['name'] = name

    return clusters
Beispiel #49
0
def fitsextract(input, header=None, stride=[1,1,1], keepref=True, keepnan=True, 
                zselect=None, col_lbl='imdat', ra_gc=None, dec_gc=None,
                pa=0, inc=0, ortlabel='default', bunit=None, first=False,
                use_hexgrid=False, sidelen=2, starting_angle=0, precision=2, 
                header_hex=None, hexgrid_output=None):

    """
    Sample data from an image into an AstroPy table indexed by coordinates.
    Simple approach taking every nth pixel along each image axis.
    Pseudocubes are handled as separate images and are detected by a blank 
    value for CTYPE3 in the header.

    Parameters
    ----------
    input : str or `~numpy.ndarray`
        The input data to reproject. This can be:
            * The name of a FITS file
            * A numpy array (in which case header must be separately given)
    header : `~astropy.io.fits.Header` object
        Header corresponding to the input array.  Must be provided if the
        input is a numpy array.
    stride : tuple of ints, optional
        step size to select pixels along each axis.  Axes are ordered using
        the FITS convention, not numpy convention (i.e. velaxis last).
        Default is [1,1,1] to keep all pixels.
        Note: stride in z is ignored for pseudocubes.
    keepref : bool, optional
        If dropping pixels, try to ensure that the reference pixel is kept.
        Default is True (keep the reference pixel).
    keepnan : bool, optional
        If False, the output table drops rows which are all-NaN.
        Default is True (keep the NaNs).
    zselect : list of ints, optional
        Indices of planes in cube/pseudocube to be selected.
        Default is to keep all planes.
    col_lbl : string or list of strings, optional
        Column label for the data values, can be list corresponding to each 
        plane to be selected, for CALIFA pseudocubes.
        Default is "imdat"+possible integer.
    ra_gc : float, optional
        Right ascension of galaxy center.  Used to determine
        polar coordinates of each sample in the plane of the galaxy.
        Default is reference RA value of the image.
    dec_gc : float, optional
        Declination of galaxy center.  Used to determine
        polar coordinates of each sample in the plane of the galaxy.
        Default is reference DEC value of the image.
    pa : float, optional
        Position angle of the galaxy disk in degrees E of N.  Used to determine
        polar coordinates of each sample in the plane of the galaxy.
        Default is 0 (due north).
    inc : float, optional
        Inclination of the galaxy disk in degrees from face-on.  Used to
        determine polar coordinates of each sample in the plane of the galaxy.
        Default is 0 (face-on).
    ortlabel : string, optional
        String label describing the origin of the orientation parameters.
    bunit : string or list of strings, optional
        Astropy units for data values, can be list corresponding to each plane
        to be selected, for CALIFA pseudocubes.
        Default is obtained from BUNIT in the header.
    first : bool, optional
        True to write the coordinate columns, which only need to be done once
        per table.  When combining multiple FITS images into a table, fitsextract
        should be called initially with first=True and then subsequently with
        first=False.
        Default is False.

    Returns
    -------
    tab : `~astropy.Table`
        The selected pixels as a 1-D table.
    """

    # Read the inputs
    if isinstance(input, np.ndarray):
        if header is None:
            raise TypeError("Header must be given if input is not FITS file")
        else:
            hdr = header
            data = input
    else:
        hdu   = fits.open(input)[0]
        data  = hdu.data
        hdr   = hdu.header if header is None else header
    bunit = hdr['BUNIT'] if bunit is None else bunit
    w     = WCS(hdr)
    print('RA ref is',w.wcs.crval[0])
    print('DEC ref is',w.wcs.crval[1])
    ndim  = len(data.shape)
    iscube = (ndim > 2 and data.shape[ndim-3] > 1)
    if 'CTYPE3' in hdr.keys():
    	pseudo = (hdr['CTYPE3'] == '')
    else:
    	pseudo = False

    # Create the coordinate columns.  First case is for PPV cubes.
    if iscube and not pseudo:
        print('This is a data cube of shape', data.shape)
        data = np.squeeze(data)
        if len(data.shape) > 3:
            raise ('Data cannot be squeezed to three dimensions')
        naxis = 3
        nz,ny,nx = data.shape
        ix,iy,iz = np.meshgrid(np.arange(nx), np.arange(ny), np.arange(nz),
                               indexing='ij')
        tab = Table([np.ravel(ix), np.ravel(iy), np.ravel(iz)],
                  names=('ix','iy','iz'),dtype=('i4','i4','i4'))
        tab['ix'].description = '0-based pixel index in x direction'
        tab['iy'].description = '0-based pixel index in y direction'
        tab['iz'].description = '0-based pixel index in z direction'
        # Get the pixel coordinates as tuples
        wcsin = (np.array([tab['ix'],tab['iy'],tab['iz']])).T
    # Next case is for 2D images or pseudo-cubes
    else:
        print('This is an image of shape', data.shape)
        data = np.squeeze(data)
        naxis = 2
        if pseudo:
            nz,ny,nx = data.shape
        else:
            ny,nx = data.shape
        ix,iy = np.meshgrid(np.arange(nx), np.arange(ny), indexing='ij')
        tab = Table([np.ravel(ix), np.ravel(iy)],
                    names=('ix','iy'),dtype=('i4','i4'))
        tab['ix'].description = '0-based pixel index in x direction'
        tab['iy'].description = '0-based pixel index in y direction'
        # Get the pixel coordinates as tuples
        wcsin = (np.array([tab['ix'],tab['iy']])).T
    # Reduce WCS to 2-D for pseudocubes
    wfix = w.sub(naxis)
    if first:
        wcsout = wfix.wcs_pix2world(wcsin,0)
        col_ra = Column(wcsout.T[0], name='ra_abs',  dtype='f4', 
                        unit='deg', format='.6f', 
                        description='sample ra coord')
        col_dc = Column(wcsout.T[1], name='dec_abs', dtype='f4', 
                        unit='deg', format='.6f', 
                        description='sample dec coord')
        col_raoff = Column(wcsout.T[0]-w.wcs.crval[0], name='ra_off',  dtype='f4', 
                        unit='deg', format='.6f', 
                        description='ra offset from ref pixel')
        col_dcoff = Column(wcsout.T[1]-w.wcs.crval[1], name='dec_off', dtype='f4', 
                        unit='deg', format='.6f', 
                        description='dec offset from ref pixel')
        if ra_gc is None:
            ra_gc = w.wcs.crval[0]
        if dec_gc is None:
            dec_gc = w.wcs.crval[1]
        r, theta = gc_polr(wcsout.T[0], wcsout.T[1], ra_gc, dec_gc, pa, inc)
        col_r = Column(r, name='rad_arc', dtype='f4', unit='arcsec', format='.3f',
            description='radius based on {}'.format(ortlabel))
        col_th = Column(theta, name='azi_ang', dtype='f4', unit='deg', format='.3f',
            description='azang based on {}'.format(ortlabel))
        tab.add_columns([col_ra,col_dc,col_raoff,col_dcoff,col_r,col_th])
        if iscube and not pseudo:
            col_vel = Column(wcsout.T[2]/1000., name='vel', dtype='f4', 
                unit='km/s', description='velocity in LSRK frame using radio def')
            tab.add_column(col_vel)

    # Flatten the cube into a 1-D table
    # Use order = 'F' because the velocity axis is first
    # For pseudocubes, each plane becomes a separate column
    if pseudo:
        zsel = range(nz) if zselect is None else zselect
        if not isinstance(bunit, list):
            bunit = [bunit]*len(zsel)
        if not isinstance(col_lbl, list):
            col_lbl = [col_lbl+str(i) for i in range(len(zsel))]
        for iz, sel in enumerate(zsel):
            try:
                desc = hdr['DESC_'+str(sel)].strip()
            except:
                desc = ''
            col_data = Column(np.ravel(data[sel],order='F'), 
                              name=col_lbl[iz], dtype='f4', 
                              description=desc, unit=bunit[iz])
            tab.add_column(col_data)
    else:
        if isinstance(bunit, list):
            bunit = bunit[0]
        if isinstance(col_lbl, list):
            col_lbl = col_lbl[0]
        col_data = Column(np.ravel(data,order='F'), name=col_lbl, dtype='f4', unit=bunit)
        tab.add_column(col_data)

    idx = ['ix', 'iy', 'iz']
    rem = [0, 0, 0]
    select = [[],[],[]]
    if not use_hexgrid:
        # Use stride to select the desired rows from the full table
        if keepref:
            for i in range(naxis):
                crpix = wfix.wcs.crpix[i]
                if crpix < 1 or crpix > hdr['naxis'+str(i+1)] or not crpix.is_integer():
                    print('Cannot use keepref on axis {}: crpix={}'.format(i+1,crpix))
                    continue
                else:
                    print('Axis {}: crpix={}'.format(i+1,crpix))
                    rem[i] = int(crpix-1) % stride[i]
            print('Remainder: ',rem)
        for i in range(naxis):
            select[i]=np.where(tab[idx[i]] % stride[i] == rem[i])[0]
        xy = np.intersect1d(select[0], select[1])
        if iscube and not pseudo:
            xyz = np.intersect1d(xy, select[2])
            if len(xyz) < len(tab):
                newtab = tab[xyz]
                tab = newtab
        else:
            if len(xy) < len(tab):
                newtab = tab[xy]
                tab = newtab
    else:
        # print(f'reference pix is {wfix.wcs.crpix[:2]}')
        if iscube and not pseudo:
            iz_data = []
            tab_length = 0
            zlist = np.where(np.unique(tab['iz']) % stride[2] == rem[2])[0]
            for iz in zlist:
                if len(tab[tab['iz'] == iz]) == 0:
                    continue
                sample = hex_sampler(tab[tab['iz'] == iz], sidelen, keepref, wfix.wcs.crpix[:2] - 1., 
                                        w.wcs.crval[0], w.wcs.crval[1], ra_gc, dec_gc, pa, inc,
                                        starting_angle, precision, hexgrid_output)
                iz_data.append(sample)
                sample['iz'] = iz
                tab_length += len(sample)
            tab = tab[:tab_length]
            init = 0
            for tabs in iz_data:
                tab[init:(init+len(tabs))] = tabs
                init += len(tabs)
        else:
            sample = hex_sampler(tab, sidelen, keepref, wfix.wcs.crpix[:2] - 1., 
                                    w.wcs.crval[0], w.wcs.crval[1], ra_gc, dec_gc, pa, inc,
                                    starting_angle, precision, hexgrid_output)
            tab = sample
    # Remove NaN rows if desired
    if not keepnan:
        if not pseudo:
            # newtab = tab[~np.isnan(tab[cname])]
            newtab = tab[~np.isnan(tab)]
            tab = newtab
        else:
            df = tab.to_pandas()
            df.dropna(how='all', subset=col_lbl)
            tab = Table.from_pandas(df)
    return tab
Beispiel #50
0
filters = FilterSet()
filters.addEAZYFilter(eazy_library, range(len(eazy_library.filternames)))
Names = ['FUV', 'NUV', 'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']
synphot1 = Observe(SP1, filters, redshift=0.001)
synphot2 = Observe(SP2, filters, redshift=0.001)
fluxes1 = np.squeeze(synphot1.fluxes).value
fluxes2 = np.squeeze(synphot2.fluxes).value

data = Table()

for i, n in enumerate(Names):
    flux = np.array([])
    for k, w in enumerate(weight):
        flux = np.append(flux, (w * fluxes1[i] * Ms[sp1,:] + \
                                (1-w) * fluxes2[i] * Ms[sp2,:]).ravel())
    err = 0.01 * flux
    data.add_columns(
        [Column(flux, 'F%s' % (i + 1)),
         Column(err, 'E%s' % (i + 1))])

id = Column(name='id', data=np.arange(1, len(data) + 1))
zspec = Column(name='zspec', data=-1 * np.ones(len(data)))
data.add_column(id, 0)
data.add_column(zspec)

df = data.to_pandas()
np.savetxt('New/Composite/composite_AC.cat',
           data,
           header=' '.join(data.colnames),
           fmt=['%d'] + ['%.5e' for k in range(20)] + ['%.2f'])
Beispiel #51
0
def match_planted(fk_candidate_observations, match_filename, bright_limit=BRIGHT_LIMIT, object_planted=OBJECT_PLANTED,
                  minimum_bright_detections=MINIMUM_BRIGHT_DETECTIONS, bright_fraction=MINIMUM_BRIGHT_FRACTION):
    """
    Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
    planted sources with found sources.

    The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
    first exposure as read from the .astrom file.

    :param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
    :param match_filename: a file that will contain a list of all planted sources and the matched found source
    @param minimum_bright_detections: if there are too few bright detections we raise an error.

    """

    found_pos = []
    detections = fk_candidate_observations.get_sources()
    for detection in detections:
        reading = detection.get_reading(0)
        # create a list of positions, to be used later by match_lists
        found_pos.append([reading.x, reading.y])

    # Now get the Object.planted file, either from the local FS or from VOSpace.
    objects_planted_uri = object_planted
    if not os.access(objects_planted_uri, os.F_OK):
        objects_planted_uri = fk_candidate_observations.observations[0].get_object_planted_uri()
    lines = open(objects_planted_uri).read()

    # we are changing the format of the Object.planted header to be compatible with astropy.io.ascii but
    # there are some old Object.planted files out there so we do these string/replace calls to reset those.
    new_lines = lines.replace("pix rate", "pix_rate")
    new_lines = new_lines.replace("""''/h rate""", "sky_rate")
    planted_objects_table = ascii.read(new_lines, header_start=-1, data_start=0)
    planted_objects_table.meta = None
    # The match_list method expects a list that contains a position, not an x and a y vector, so we transpose.
    planted_pos = numpy.transpose([planted_objects_table['x'].data, planted_objects_table['y'].data])
    # match_idx is an order list.  The list is in the order of the first list of positions and each entry
    # is the index of the matching position from the second list.
    (match_idx, match_fnd) = util.match_lists(numpy.array(planted_pos), numpy.array(found_pos))
    assert isinstance(match_idx, numpy.ma.MaskedArray)
    assert isinstance(match_fnd, numpy.ma.MaskedArray)
    false_positives_table = Table()
    # Once we've matched the two lists we'll need some new columns to store the information in.
    # these are masked columns so that object.planted entries that have no detected match are left 'blank'.
    new_columns = [MaskedColumn(name="measure_x", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_y", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_rate", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_angle", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_mag1", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_merr1", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_mag2", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_merr2", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_mag3", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_merr3", length=len(planted_objects_table), mask=True)]
    planted_objects_table.add_columns(new_columns)
    tlength = 0
    new_columns = [MaskedColumn(name="measure_x", length=tlength, mask=True),
                   MaskedColumn(name="measure_y", length=tlength, mask=True),
                   MaskedColumn(name="measure_rate", length=0, mask=True),
                   MaskedColumn(name="measure_angle", length=0, mask=True),
                   MaskedColumn(name="measure_mag1", length=0, mask=True),
                   MaskedColumn(name="measure_merr1", length=0, mask=True),
                   MaskedColumn(name="measure_mag2", length=0, mask=True),
                   MaskedColumn(name="measure_merr2", length=0, mask=True),
                   MaskedColumn(name="measure_mag3", length=tlength, mask=True),
                   MaskedColumn(name="measure_merr3", length=tlength, mask=True)]
    false_positives_table.add_columns(new_columns)

    # We do some 'checks' on the Object.planted match to diagnose pipeline issues.  Those checks are made using just
    # those planted sources we should have detected.
    bright = planted_objects_table['mag'] < bright_limit
    n_bright_planted = numpy.count_nonzero(planted_objects_table['mag'][bright])

    measures = []
    idxs = []
    for idx in range(len(match_idx)):
        # The match_idx value is False if nothing was found.
        if not match_idx.mask[idx]:
            # Each 'source' has multiple 'readings'
            measures.append(detections[match_idx[idx]].get_readings())
            idxs.append(idx)

    observations = measure_mags(measures)

    for oidx in range(len(measures)):
        idx = idxs[oidx]
        readings = measures[oidx]
        start_jd = util.Time(readings[0].obs.header['MJD_OBS_CENTER'], format='mpc', scale='utc').jd
        end_jd = util.Time(readings[-1].obs.header['MJD_OBS_CENTER'], format='mpc', scale='utc').jd
        rate = math.sqrt((readings[-1].x - readings[0].x) ** 2 + (readings[-1].y - readings[0].y) ** 2) / (
            24 * (end_jd - start_jd))
        rate = int(rate * 100) / 100.0
        angle = math.degrees(math.atan2(readings[-1].y - readings[0].y, readings[-1].x - readings[0].x))
        angle = int(angle * 100) / 100.0
        planted_objects_table[idx]['measure_rate'] = rate
        planted_objects_table[idx]['measure_angle'] = angle
        planted_objects_table[idx]['measure_x'] = observations[readings[0].obs]['mags']["XCENTER"][oidx]
        planted_objects_table[idx]['measure_y'] = observations[readings[0].obs]['mags']["YCENTER"][oidx]
        for ridx in range(len(readings)):
            reading = readings[ridx]
            mags = observations[reading.obs]['mags']
            planted_objects_table[idx]['measure_mag{}'.format(ridx+1)] = mags["MAG"][oidx]
            planted_objects_table[idx]['measure_merr{}'.format(ridx+1)] = mags["MERR"][oidx]

    # for idx in range(len(match_fnd)):
    #     if match_fnd.mask[idx]:
    #         measures = detections[idx].get_readings()
    #         false_positives_table.add_row()
    #         false_positives_table[-1] = measure_mags(measures, false_positives_table[-1])

    # Count an object as detected if it has a measured magnitude in the first frame of the triplet.
    n_bright_found = numpy.count_nonzero(planted_objects_table['measure_mag1'][bright])
    # Also compute the offset and standard deviation of the measured magnitude from that planted ones.
    offset = numpy.mean(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
    try:
        offset = "{:5.2f}".format(offset)
    except:
        offset = "indef"

    std = numpy.std(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
    try:
        std = "{:5.2f}".format(std)
    except:
        std = "indef"

    if os.access(match_filename, os.R_OK):
        fout = open(match_filename, 'a')
    else:
        fout = open(match_filename, 'w')

    fout.write("#K {:10s} {:10s}\n".format("EXPNUM", "FWHM"))
    for measure in detections[0].get_readings():
        fout.write('#V {:10s} {:10s}\n'.format(measure.obs.header['EXPNUM'], measure.obs.header['FWHM']))

    fout.write("#K ")
    for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
        fout.write("{:10s} ".format(keyword))
    fout.write("\n")

    fout.write("#V ")
    for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
        fout.write("{:10s} ".format(fk_candidate_observations.sys_header[keyword]))
    fout.write("\n")

    fout.write("#K ")
    for keyword in ["NBRIGHT", "NFOUND", "OFFSET", "STDEV"]:
        fout.write("{:10s} ".format(keyword))
    fout.write("\n")
    fout.write("#V {:<10} {:<10} {:<10} {:<10}\n".format(n_bright_planted,
                                                         n_bright_found,
                                                         offset,
                                                         std))

    try:
        writer = ascii.FixedWidth
        # add a hash to the start of line that will have header columns: for JMP
        fout.write("#")
        fout.flush()
        ascii.write(planted_objects_table, output=fout, Writer=writer, delimiter=None)
        if len(false_positives_table) > 0:
            with open(match_filename+".fp", 'a') as fpout:
                fpout.write("#")
                ascii.write(false_positives_table, output=fpout, Writer=writer, delimiter=None)
    except Exception as e:
        logging.error(str(e))
        raise e
    finally:
        fout.close()

    # Some simple checks to report a failure how we're doing.
    if n_bright_planted < minimum_bright_detections:
        raise RuntimeError(1, "Too few bright objects planted.")

    if n_bright_found / float(n_bright_planted) < bright_fraction:
        raise RuntimeError(2, "Too few bright objects found.")

    return "{} {} {} {}".format(n_bright_planted, n_bright_found, offset, std)
Beispiel #52
0
def make_sensitivity_curves(tint=1200, spec_res=100, aper_radius=0.15, seeing_limited=False):
    mag = np.arange(10, 22)
    snr_y = np.zeros(len(mag), dtype=float)
    snr_j = np.zeros(len(mag), dtype=float)
    snr_h = np.zeros(len(mag), dtype=float)
    snr_sum_y = np.zeros(len(mag), dtype=float)
    snr_sum_j = np.zeros(len(mag), dtype=float)
    snr_sum_h = np.zeros(len(mag), dtype=float)
    bkg_y = np.zeros(len(mag), dtype=float)
    bkg_j = np.zeros(len(mag), dtype=float)
    bkg_h = np.zeros(len(mag), dtype=float)
    star_y = np.zeros(len(mag), dtype=float)
    star_j = np.zeros(len(mag), dtype=float)
    star_h = np.zeros(len(mag), dtype=float)

    spec_y_tab = None
    spec_j_tab = None
    spec_h_tab = None

    # Calculate the number of supernovae.
    N_SNe = 4500.0 * 0.6 * 10 ** (mag - 18.9)

    out_file = "roboAO_sensitivity_t{0:d}_R{1:d}_ap{2:0.3f}".format(tint, spec_res, aper_radius)

    # Save the output to a table.
    _out = open(out_file + ".txt", "w")

    meta1 = '# tint = {0:5d}, R = {1:5d}, apRad = {2:5.3f}"\n'
    _out.write(meta1.format(tint, spec_res, aper_radius))
    _out.write("# Sensitivity integrated over broad band.")

    hdr = "{0:5s}  {1:6s}  {2:5s}  {3:5s}  {4:5s}  {5:5s}  {6:5s}  {7:5s}\n"
    fmt = "{0:5.1f}  {1:6.1f}  {2:5.1f}  {3:5.1f}  {4:5.1f}  {5:5.1f}  {6:5.1f}  {7:5.1f}\n"
    _out.write(hdr.format("# Mag", "N_SNe", "J_SNR", "H_SNR", "J_ms", "H_ms", "J_mb", "H_mb"))

    for mm in range(len(mag)):
        print "Mag: ", mag[mm]
        blah_y = etc_uh_roboAO(
            mag[mm], "Y", tint, spec_res=spec_res, aper_radius=aper_radius, seeing_limited=seeing_limited
        )
        blah_j = etc_uh_roboAO(
            mag[mm], "J", tint, spec_res=spec_res, aper_radius=aper_radius, seeing_limited=seeing_limited
        )
        blah_h = etc_uh_roboAO(
            mag[mm], "H", tint, spec_res=spec_res, aper_radius=aper_radius, seeing_limited=seeing_limited
        )

        col_y_suffix = "_Y_{0:d}".format(mag[mm])
        col_j_suffix = "_J_{0:d}".format(mag[mm])
        col_h_suffix = "_H_{0:d}".format(mag[mm])

        spec_signal_y = Column(name="sig" + col_y_suffix, data=blah_y[4])
        spec_signal_j = Column(name="sig" + col_j_suffix, data=blah_j[4])
        spec_signal_h = Column(name="sig" + col_h_suffix, data=blah_h[4])
        spec_bkg_y = Column(name="bkg" + col_y_suffix, data=blah_y[5])
        spec_bkg_j = Column(name="bkg" + col_j_suffix, data=blah_j[5])
        spec_bkg_h = Column(name="bkg" + col_h_suffix, data=blah_h[5])
        spec_snr_y = Column(name="snr" + col_y_suffix, data=blah_y[6])
        spec_snr_j = Column(name="snr" + col_j_suffix, data=blah_j[6])
        spec_snr_h = Column(name="snr" + col_h_suffix, data=blah_h[6])

        if spec_y_tab == None:
            spec_y_tab = Table([blah_y[3]], names=["wave_Y"])
        if spec_j_tab == None:
            spec_j_tab = Table([blah_j[3]], names=["wave_J"])
        if spec_h_tab == None:
            spec_h_tab = Table([blah_h[3]], names=["wave_H"])

        spec_y_tab.add_columns([spec_signal_y, spec_bkg_y, spec_snr_y])
        spec_j_tab.add_columns([spec_signal_j, spec_bkg_j, spec_snr_j])
        spec_h_tab.add_columns([spec_signal_h, spec_bkg_h, spec_snr_h])

        snr_y[mm] = blah_y[0]
        snr_j[mm] = blah_j[0]
        snr_h[mm] = blah_h[0]
        snr_sum_y[mm] = math.sqrt((spec_snr_y ** 2).sum())
        snr_sum_j[mm] = math.sqrt((spec_snr_j ** 2).sum())
        snr_sum_h[mm] = math.sqrt((spec_snr_h ** 2).sum())

        star_y[mm] = blah_y[1]
        star_j[mm] = blah_j[1]
        star_h[mm] = blah_h[1]
        bkg_y[mm] = blah_y[2]
        bkg_j[mm] = blah_j[2]
        bkg_h[mm] = blah_h[2]

    avg_tab = Table(
        [mag, snr_y, snr_j, snr_h, snr_sum_y, snr_sum_j, snr_sum_h, star_y, star_j, star_h, bkg_y, bkg_j, bkg_h],
        names=[
            "mag",
            "snr_y",
            "snr_j",
            "snr_h",
            "snr_sum_y",
            "snr_sum_j",
            "snr_sum_h",
            "star_y",
            "star_j",
            "star_h",
            "bkg_y",
            "bkg_j",
            "bkg_h",
        ],
    )

    out_file = "roboAO_sensitivity_t{0:d}_R{1:d}_ap{2:0.3f}".format(tint, spec_res, aper_radius)

    if seeing_limited:
        out_file += "_seeing"

    # Save the tables
    spec_y_tab.write(out_file + "_spec_y_tab.fits", overwrite=True)
    spec_j_tab.write(out_file + "_spec_j_tab.fits", overwrite=True)
    spec_h_tab.write(out_file + "_spec_h_tab.fits", overwrite=True)
    avg_tab.write(out_file + "_avg_tab.fits", overwrite=True)

    return
Beispiel #53
0
def make_tint_curves(mag=20, spec_res=100, aper_radius=0.15, seeing_limited=False):
    tint = np.arange(300, 3600 + 1, 300)
    snr_y = np.zeros(len(tint), dtype=float)
    snr_j = np.zeros(len(tint), dtype=float)
    snr_h = np.zeros(len(tint), dtype=float)

    snr_sum_y = np.zeros(len(tint), dtype=float)
    snr_sum_j = np.zeros(len(tint), dtype=float)
    snr_sum_h = np.zeros(len(tint), dtype=float)

    spec_y_tab = None
    spec_j_tab = None
    spec_h_tab = None

    for tt in range(len(tint)):
        print "Tint: ", tint[tt]
        blah_y = etc_uh_roboAO(
            mag, "Y", tint[tt], spec_res=spec_res, aper_radius=aper_radius, seeing_limited=seeing_limited
        )
        blah_j = etc_uh_roboAO(
            mag, "J", tint[tt], spec_res=spec_res, aper_radius=aper_radius, seeing_limited=seeing_limited
        )
        blah_h = etc_uh_roboAO(
            mag, "H", tint[tt], spec_res=spec_res, aper_radius=aper_radius, seeing_limited=seeing_limited
        )

        col_y_suffix = "_Y_{0:d}".format(tint[tt])
        col_j_suffix = "_J_{0:d}".format(tint[tt])
        col_h_suffix = "_H_{0:d}".format(tint[tt])

        spec_signal_y = Column(name="sig" + col_y_suffix, data=blah_y[4])
        spec_signal_j = Column(name="sig" + col_j_suffix, data=blah_j[4])
        spec_signal_h = Column(name="sig" + col_h_suffix, data=blah_h[4])
        spec_bkg_y = Column(name="bkg" + col_y_suffix, data=blah_y[5])
        spec_bkg_j = Column(name="bkg" + col_j_suffix, data=blah_j[5])
        spec_bkg_h = Column(name="bkg" + col_h_suffix, data=blah_h[5])
        spec_snr_y = Column(name="snr" + col_y_suffix, data=blah_y[6])
        spec_snr_j = Column(name="snr" + col_j_suffix, data=blah_j[6])
        spec_snr_h = Column(name="snr" + col_h_suffix, data=blah_h[6])

        if spec_y_tab == None:
            spec_y_tab = Table([blah_y[3]], names=["wave_Y"])
        if spec_j_tab == None:
            spec_j_tab = Table([blah_j[3]], names=["wave_J"])
        if spec_h_tab == None:
            spec_h_tab = Table([blah_h[3]], names=["wave_H"])

        spec_y_tab.add_columns([spec_signal_y, spec_bkg_y, spec_snr_y])
        spec_j_tab.add_columns([spec_signal_j, spec_bkg_j, spec_snr_j])
        spec_h_tab.add_columns([spec_signal_h, spec_bkg_h, spec_snr_h])

        snr_y[tt] = blah_y[0]
        snr_j[tt] = blah_j[0]
        snr_h[tt] = blah_h[0]

        snr_sum_y[tt] = math.sqrt((spec_snr_y ** 2).sum())
        snr_sum_j[tt] = math.sqrt((spec_snr_j ** 2).sum())
        snr_sum_h[tt] = math.sqrt((spec_snr_h ** 2).sum())

    avg_tab = Table(
        [tint, snr_y, snr_sum_y, snr_j, snr_sum_j, snr_h, snr_sum_h],
        names=["tint", "snr_y", "snr_sum_y", "snr_j", "snr_sum_j", "snr_h", "snr_sum_h"],
    )

    out_file = "roboAO_tint_m{0:d}_R{1:d}_ap{2:0.3f}".format(mag, spec_res, aper_radius)

    if seeing_limited:
        out_file += "_seeing"

    # Save the tables
    spec_y_tab.write(out_file + "_spec_y_tab.fits", overwrite=True)
    spec_j_tab.write(out_file + "_spec_j_tab.fits", overwrite=True)
    spec_h_tab.write(out_file + "_spec_h_tab.fits", overwrite=True)
    avg_tab.write(out_file + "_avg_tab.fits", overwrite=True)

    return
Beispiel #54
0
                 unit='deg',
                 dtype=float)
t_orrerr = Column(name='e_original_RAJ2000',
                  data=np.array(original_rerrs),
                  description='Error on original right ascension of source',
                  unit='deg',
                  dtype=float)
t_orderr = Column(name='e_original_DECJ2000',
                  data=np.array(original_derrs),
                  description='Error on original declination of source',
                  unit='deg',
                  dtype=float)
##Add the columns

t.add_columns([
    t_name, t_base_name, t_upra, t_updec, t_uprerr, t_upderr, t_orra, t_ordec,
    t_orrerr, t_orderr
])

##Check to see if we have repeated frequencies between different catalogues
##If we do, we need to make unique names for the flux columns
all_freqs = []
for freqs in cat_freqs:
    for freq in freqs:
        all_freqs.append(freq)

if len(all_freqs) == len(np.unique(all_freqs)):
    duplicates = False
else:
    duplicates = False

##For every catalogue in the match
Beispiel #55
0
##Create many columns of data
t_name = Column(name='Name',data=names,description='Name based on position of combined source',dtype=str)
prim_names = [source.names[0] for source in sources]
t_base_name = Column(name='%s_name' %matched_cats[0],data=prim_names,description='Name of %s component' %matched_cats[0],dtype=str)
t_upra = Column(name='updated_RAJ2000',data=np.array(updated_ras),description='Updated right ascension of source',unit='deg',dtype=float)
t_updec = Column(name='updated_DECJ2000',data=np.array(updated_decs),description='Updated declination of source',unit='deg',dtype=float)
t_uprerr = Column(name='e_updated_RAJ2000',data=np.array(updated_rerrs),description='Error on updated right ascension of source',unit='deg',dtype=float)
t_upderr = Column(name='e_updated_DECJ2000',data=np.array(updated_derrs),description='Error on updated declination of source',unit='deg',dtype=float)
t_orra = Column(name='original_RAJ2000',data=np.array(original_ras),description='Original right ascension of source',unit='deg',dtype=float)
t_ordec = Column(name='original_DECJ2000',data=np.array(original_decs),description='Original feclination of source',unit='deg',dtype=float)
t_orrerr = Column(name='e_original_RAJ2000',data=np.array(original_rerrs),description='Error on original right ascension of source',unit='deg',dtype=float)
t_orderr = Column(name='e_original_DECJ2000',data=np.array(original_derrs),description='Error on original declination of source',unit='deg',dtype=float)
##Add the columns

t.add_columns([t_name,t_base_name,t_upra,t_updec,t_uprerr,t_upderr,t_orra,t_ordec,t_orrerr,t_orderr])
		
##For every catalogue in the match
for cat in xrange(len(num_freqs)):
	##See how many frequencies that source has
	num_freq = num_freqs[cat]
	##For every frequency, make a column of fluxes and flux errors, masking every value with -100000.0
	for freq in xrange(num_freq):
		fluxs = np.array([src.fluxs[cat][freq] for src in sources])
		ferrs = np.array([src.ferrs[cat][freq] for src in sources])
		t_flux = MaskedColumn(name='S_%d' %int(cat_freqs[cat][freq]),data=fluxs,description='Flux at %.1fMHz' %float(cat_freqs[cat][freq]),mask=fluxs==-100000.0, fill_value=None,unit='Jy',dtype=float)
		t_ferr = MaskedColumn(name='e_S_%d' %int(cat_freqs[cat][freq]),data=ferrs,description='Flux error at %.1fMHz' %float(cat_freqs[cat][freq]),mask=ferrs==-100000.0, fill_value=None,unit='Jy',dtype=float)
		t.add_columns([t_flux,t_ferr])

##Extrapolate the base catalogue frequency flux density, using the fitted values for late comparison
extrap_freq = cat_freqs[0][0]