Exemplo n.º 1
0
    def test_array_wrap(self):
        """Test that the __array_wrap__ method converts a reduction ufunc
        output that has a different shape into an ndarray view.  Without this a
        method call like c.mean() returns a Column array object with length=1."""
        # Mean and sum for a 1-d float column
        c = table.Column(name='a', data=[1., 2., 3.])
        assert np.allclose(c.mean(), 2.0)
        assert isinstance(c.mean(), (np.floating, float))
        assert np.allclose(c.sum(), 6.)
        assert isinstance(c.sum(), (np.floating, float))

        # Non-reduction ufunc preserves Column class
        assert isinstance(np.cos(c), table.Column)

        # Sum for a 1-d int column
        c = table.Column(name='a', data=[1, 2, 3])
        assert np.allclose(c.sum(), 6)
        assert isinstance(c.sum(), (np.integer, int))

        # Sum for a 2-d int column
        c = table.Column(name='a', data=[[1, 2, 3], [4, 5, 6]])
        assert c.sum() == 21
        assert isinstance(c.sum(), (np.integer, int))
        assert np.all(c.sum(axis=0) == [5, 7, 9])
        assert c.sum(axis=0).shape == (3, )
        assert isinstance(c.sum(axis=0), np.ndarray)

        # Sum and mean for a 1-d masked column
        c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])
        assert np.allclose(c.mean(), 1.5)
        assert isinstance(c.mean(), (np.floating, float))
        assert np.allclose(c.sum(), 3.)
        assert isinstance(c.sum(), (np.floating, float))
Exemplo n.º 2
0
def parse_manifest(manifest):
    """
    Parse manifest and add back columns that are useful for TESS DV exploration.
    """
    results = deepcopy(manifest)
    filenames = []
    sector_range = []
    exts = []
    for i, f in enumerate(manifest['Local Path']):
        file_parts = np.array(np.unique(f.split(sep='-')))
        sectors = list(map(lambda x: x[0:2] == 's0', file_parts))
        s1 = file_parts[sectors][0]
        try:
            s2 = file_parts[sectors][1]
        except:
            s2 = s1
        sector_range.append("%s-%s" % (s1, s2))
        path_parts = np.array(f.split(sep='/'))
        filenames.append(path_parts[-1])
        exts.append(path_parts[-1][-8:])

    results.add_column(table.Column(name="filename", data=filenames))
    results.add_column(table.Column(name="sectors", data=sector_range))
    results.add_column(table.Column(name="fileType", data=exts))
    results.add_column(
        table.Column(name="index", data=np.arange(0, len(manifest))))

    return results
Exemplo n.º 3
0
    def compute_TDBs(self, method="astropy", ephem=None):
        """Compute and add TDB and TDB long double columns to the TOA table.
        This routine creates new columns 'tdb' and 'tdbld' in a TOA table
        for TDB times, using the Observatory locations and IERS A Earth
        rotation corrections for UT1.
        """
        log.info('Computing TDB columns.')
        if 'tdb' in self.table.colnames:
            log.info('tdb column already exists. Deleting...')
            self.table.remove_column('tdb')
        if 'tdbld' in self.table.colnames:
            log.info('tdbld column already exists. Deleting...')
            self.table.remove_column('tdbld')

        # Compute in observatory groups
        tdbs = numpy.zeros_like(self.table['mjd'])
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            loind, hiind = self.table.groups.indices[ii:ii + 2]
            site = get_observatory(obs)
            grpmjds = time.Time(grp['mjd'], location=grp['mjd'][0].location)
            grptdbs = site.get_TDBs(grpmjds, method=method, ephem=ephem)
            tdbs[loind:hiind] = numpy.asarray([t for t in grptdbs])

        # Now add the new columns to the table
        col_tdb = table.Column(name='tdb', data=tdbs)
        col_tdbld = table.Column(
            name='tdbld', data=[utils.time_to_longdouble(t) for t in tdbs])
        self.table.add_columns([col_tdb, col_tdbld])
Exemplo n.º 4
0
    def make_spectra_table(nebins, spec_dict):
        """ Construct the spectral values table
        """
        from astropy import table

        col_masses = table.Column(name="ref_mass",
                                  dtype=float,
                                  unit="GeV",
                                  data=spec_dict['mass'])
        col_chans = table.Column(name="ref_chan",
                                 dtype=int,
                                 data=spec_dict['chan'])
        col_dnde = table.Column(name="ref_dnde",
                                dtype=float,
                                shape=nebins,
                                unit="ph / (MeV cm2 s)",
                                data=spec_dict['dnde'])
        col_flux = table.Column(name="ref_flux",
                                dtype=float,
                                shape=nebins,
                                unit="ph / (cm2 s)",
                                data=spec_dict['flux'])
        col_eflux = table.Column(name="ref_eflux",
                                 dtype=float,
                                 shape=nebins,
                                 unit="MeV / (cm2 s)",
                                 data=spec_dict['eflux'])

        table = table.Table(
            data=[col_masses, col_chans, col_dnde, col_flux, col_eflux])
        return table
Exemplo n.º 5
0
def update_4F_stats():
    # this function updates the "FILTER" column on the 4F stats summaries
    suffixes = list(set(dict_suffix_rot.values()))
    for suf in suffixes:
        # Iterating through filters
        for f in filters:
            s_f = stats_dir + f'stats_{suf}_{f}.fits'
            print("Updating: ", s_f)
            stats = table.Table.read(s_f)
            # from the number of the file we can add in a column for order. this could have been easier at a different step?
            # list of all the image numbers used in this file
            images = [re.search(f"Fld2/sta(.*){suf}", filt).group(1) for filt in stats["Image"]]
            # want to find the key corresponding to these
            f_ord = [next(key for key, value in dict_images.items() if (int(i) in value)).split("_")[1] for i in images]
            # then get the filter order depending on that key
            stats.replace_column('FILTER', table.Column(np.repeat(f, len(stats))))
            if not 'F_ORD'in stats.colnames:
                stats.add_column(table.Column(f_ord, name='F_ORD'))
            if not 'wavelength' in stats.colnames:
                stats.add_column(table.Column(np.repeat(util.get_wavelength(f), len(stats)), name='wavelength'))
            if not 'quad' in stats.colnames:
                stats.add_column(table.Column([util.get_quad(f, odr) for odr in f_ord], name='quad'))
            stats.write(s_f, overwrite=True)
                             
    return
Exemplo n.º 6
0
def read_apertures(fname):
    a = []
    centers = []
    with open(fname, 'r') as f:
        for line in f:
            if ('begin' in line) or ('title' in line):
                a.append(line)
            elif 'center' in line:
                centers.append(float(line.split()[1]))

    column = np.unique(centers)

    t = table.Table([
        table.Column(name='line', dtype=float),
        table.Column(name='num', dtype=int),
        table.Column(name='bundle', dtype='S10'),
        table.Column(name='fiber', dtype=int),
    ])

    for i in range(0, len(a), 2):
        t.add_row([
            a[i].split()[-1].strip(),
            a[i].split()[-3].strip(),
            a[i + 1].split()[-1].split('_')[0].strip(),
            a[i + 1].split()[-1].split('_')[1].strip()])

    return t, column
Exemplo n.º 7
0
    def __init__(self,
                 x=[],
                 xmin=[],
                 xmax=[],
                 y=[],
                 dy=[],
                 xunit=au.nm,
                 yunit=au.erg / au.cm**2 / au.s / au.nm,
                 meta={},
                 dtype=float):

        t = at.Table()
        t['x'] = at.Column(np.array(x, ndmin=1), dtype=dtype, unit=xunit)
        t['xmin'] = at.Column(np.array(xmin, ndmin=1), dtype=dtype, unit=xunit)
        t['xmax'] = at.Column(np.array(xmax, ndmin=1), dtype=dtype, unit=xunit)
        t['y'] = at.Column(np.array(y, ndmin=1), dtype=dtype, unit=yunit)
        t['dy'] = at.Column(np.array(dy, ndmin=1), dtype=dtype, unit=yunit)
        self._t = t
        self._xunit = xunit
        self._yunit = yunit
        self._meta = meta
        self._dtype = dtype
        self._rfz = 0.0

        self.x = au.Quantity(self._t['x'])
Exemplo n.º 8
0
    def make_ebounds_table(emin, emax, eref):
        """ Construct the energy bounds table

        Returns
        -------

        table : `astropy.table.Table`
            The table has these columns and one row per energy bin

        E_MIN : float
            Energy bin lower edge

        E_MAX : float
            Energy bin upper edge

        E_REF : float
            Reference energy for bin, typically geometric mean
            of bin edges

        """
        col_emin = table.Column(name="E_MIN",
                                dtype=float,
                                unit="MeV",
                                data=emin)
        col_emax = table.Column(name="E_MAX",
                                dtype=float,
                                unit="MeV",
                                data=emax)
        col_eref = table.Column(name="E_REF",
                                dtype=float,
                                unit="MeV",
                                data=eref)

        tab = table.Table(data=[col_emin, col_emax, col_eref])
        return tab
Exemplo n.º 9
0
def addArtifactsToMask(regFile, surveyMask, wcs):
    """Given a .reg file of circular regions, make holes in surveyMask...
    
    """

    RAs = []
    decs = []
    rArcmin = []
    with open(regFile) as inFile:
        lines = inFile.readlines()
        for line in lines:
            if line[0] != "#" and line.find("circle") != -1:
                bits = line.split("(")[-1].split('")')[0].split(",")
                RAs.append(float(bits[0]))
                decs.append(float(bits[1]))
                rArcmin.append(float(bits[2]) / 60.)

    tab = atpy.Table()
    tab.add_column(atpy.Column(RAs, 'RADeg'))
    tab.add_column(atpy.Column(decs, 'decDeg'))
    tab.add_column(atpy.Column(rArcmin, 'rArcmin'))
    rArcminMap = np.ones(surveyMask.shape, dtype=float) * 1e6
    count = 0
    for row in tab:
        count = count + 1
        print(count)
        x, y = wcs.wcs2pix(row['RADeg'], row['decDeg'])
        rArcminMap, xBounds, yBounds = nemoCython.makeDegreesDistanceMap(
            rArcminMap, wcs, row['RADeg'], row['decDeg'], row['rArcmin'] / 60.)
        rArcminMap = rArcminMap * 60
        surveyMask[rArcminMap < row['rArcmin']] = 0

    return surveyMask
Exemplo n.º 10
0
def find_std_zeropoint(file_dir, file_list, k, m_std, use_aperture, 
                       x=None, y=None, plot=True, fig_dir='./'):
    phot_tab_std = None
    for ifile in file_list: 
        filename = os.path.join(file_dir, ifile)
        xcen, ycen = find_obj_center(filename, x=x, y=y, plot=plot, fig_dir=fig_dir)
        aperture = np.arange(2, 20, 2)
        phot_tab_std_tmp, apertures_std = perform_aperture_photometry(xcen, ycen, filename, aperture_radii=aperture)
        airmass = fits.getval(filename, 'airmass', 0)
        phot_tab_std_tmp.add_column(table.Column(data=[ifile], name='filename'))
        phot_tab_std_tmp.add_column(table.Column(data=[airmass], name='airmass'))
        aper_indx = np.where(aperture==use_aperture)[0][0]
        N = phot_tab_std_tmp['aperture_sum_{}_bkg_sub'.format(aper_indx)]
        t = fits.getval(filename, 'exptime', 0)
        m_inst = 28 - 2.5*np.log10(N/t) - k*(airmass-1)
        phot_tab_std_tmp.add_column(table.Column(data=[m_inst], name='m_inst_{}'.format(aper_indx)))
        if phot_tab_std:
            phot_tab_std = table.vstack((phot_tab_std, phot_tab_std_tmp))
        else:
            phot_tab_std = phot_tab_std_tmp
        if plot is True:
            plt.figure()
            for indx, aper in enumerate(apertures_std[:-1]):
                colname = 'aperture_sum_{}_bkg_sub'.format(indx)
                plt.plot(aper.r, phot_tab_std[-1][colname], 'o')
            plt.grid()

    m_zpt = np.median(m_std - phot_tab_std['m_inst_{}'.format(aper_indx)] + 28)  
    return m_zpt
Exemplo n.º 11
0
def test_latex_units():
    """
    Check to make sure that Latex and AASTex writers attempt to fall
    back on the **unit** attribute of **Column** if the supplied
    **latexdict** does not specify units.
    """
    t = table.Table([
        table.Column(name='date', data=['a', 'b']),
        table.Column(name='NUV exp.time', data=[1, 2])
    ])
    latexdict = copy.deepcopy(ascii.latexdicts['AA'])
    latexdict['units'] = {'NUV exp.time': 's'}
    out = StringIO()
    expected = '''\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
'''.replace('\n', os.linesep)

    ascii.write(t, out, format='aastex', latexdict=latexdict)
    assert out.getvalue() == expected
    # use unit attribute instead
    t['NUV exp.time'].unit = u.s
    t['date'].unit = u.yr
    out = StringIO()
    ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA'])
    assert out.getvalue() == expected.replace(
        'colhead{s}',
        r'colhead{$\mathrm{s}$}').replace('colhead{ }',
                                          r'colhead{$\mathrm{yr}$}')
Exemplo n.º 12
0
 def get_daofind_table(self):
     tbl = Table.read(self._path_cache['daofind_output'], format='daophot')
     # Convert pixel coordinates into ra/dec
     ra, dec = self.pix2world(tbl['XCENTER'], tbl['YCENTER'], origin=1)
     ra_col = table.Column(name='ra', data=ra)
     dec_col = table.Column(name='dec', data=dec)
     tbl.add_columns([ra_col, dec_col])
     return tbl
Exemplo n.º 13
0
 def column_data(self):
     cols = [
         table.Column(name="ID", data=self.object_ids),
         table.Column(name="SFR",
                      data=np.random.lognormal(-0.6, 0.3, self.size)),
         table.Column(name="SFR_ERR",
                      data=np.random.normal(0, 0.1, self.size)**2)
     ]
     return cols
Exemplo n.º 14
0
 def column_data(self):
     cols = [
         table.Column(name="ID", data=self.object_ids),
         table.Column(name="StellarMass",
                      data=np.random.normal(10, 0.5, self.size)),
         table.Column(name="StellarMassError",
                      data=np.random.normal(0, 1, self.size)**2)
     ]
     return cols
Exemplo n.º 15
0
def add_w(t):
    ws = gen.map_np(gen.w, t["de_rad"], t["plx"], t["pm_ra"], t["pm_de"],
                    np.zeros(len(t)))
    t.add_column(table.Column(data=ws[:, 0, 0], name="w1", unit="km / s"))
    t.add_column(table.Column(data=ws[:, 1, 0], name="w2", unit="1 / yr"))
    t.add_column(table.Column(data=ws[:, 2, 0], name="w3", unit="1 / yr"))
    t["w1"].axis_label = "$w_1$"
    t["w2"].axis_label = "$w_2$"
    t["w3"].axis_label = "$w_3$"
Exemplo n.º 16
0
    def compute_TDBs(self, method="default", ephem=None):
        """Compute and add TDB and TDB long double columns to the TOA table.
        This routine creates new columns 'tdb' and 'tdbld' in a TOA table
        for TDB times, using the Observatory locations and IERS A Earth
        rotation corrections for UT1.
        """
        log.info('Computing TDB columns.')
        if 'tdb' in self.table.colnames:
            log.info('tdb column already exists. Deleting...')
            self.table.remove_column('tdb')
        if 'tdbld' in self.table.colnames:
            log.info('tdbld column already exists. Deleting...')
            self.table.remove_column('tdbld')

        if ephem is None:
            if self.ephem is not None:
                ephem = self.ephem
            else:
                log.warning('No ephemeris provided to TOAs object or compute_TDBs. Using DE421')
                ephem = 'DE421'

        # Compute in observatory groups
        tdbs = numpy.zeros_like(self.table['mjd'])
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            loind, hiind = self.table.groups.indices[ii:ii+2]
            site = get_observatory(obs)
            if isinstance(site,TopoObs):
                # For TopoObs, it is safe to assume that all TOAs have same location
                # I think we should report to astropy that initializing
                # a Time from a list (or Column) of Times throws away the location information
                grpmjds = time.Time(grp['mjd'], location=grp['mjd'][0].location)
            else:
                # Grab locations for each TOA
                # It is crazy that I have to deconstruct the locations like
                # this to build a single EarthLocation object with an array
                # of locations contained in it.
                # Is there a more efficient way to convert a list of EarthLocations
                # into a single EarthLocation object with an array of values internally?
                loclist = [t.location for t in grp['mjd']]
                if loclist[0] is None:
                    grpmjds = time.Time(grp['mjd'],location=None)
                else:
                    locs = EarthLocation(numpy.array([l.x.value for l in loclist])*u.m,
                                         numpy.array([l.y.value for l in loclist])*u.m,
                                         numpy.array([l.z.value for l in loclist])*u.m)
                    grpmjds = time.Time(grp['mjd'],location=locs)
            grptdbs = site.get_TDBs(grpmjds, method=method, ephem=ephem)
            tdbs[loind:hiind] = numpy.asarray([t for t in grptdbs])

        # Now add the new columns to the table
        col_tdb = table.Column(name='tdb', data=tdbs)
        col_tdbld = table.Column(name='tdbld',
                data=[utils.time_to_longdouble(t) for t in tdbs])
        self.table.add_columns([col_tdb, col_tdbld])
Exemplo n.º 17
0
def add_v_proj(t):
    ws = gen.XD_arr(t, "w1", "w2", "w3")
    vs = gen.map_np(gen.v_proj, ws, t["R^-1"])

    t.add_column(table.Column(data=vs[:, 0], name="v_x", unit="km / s"))
    t["v_x"].axis_label = "$v_x$"
    t.add_column(table.Column(data=vs[:, 1], name="v_y", unit="km / s"))
    t["v_y"].axis_label = "$v_y$"
    t.add_column(table.Column(data=vs[:, 2], name="v_z", unit="km / s"))
    t["v_z"].axis_label = "$v_z$"
Exemplo n.º 18
0
def test_getitem_metadata_regression():
    """
    Regression test for #1471: MaskedArray does not call __array_finalize__ so
    the meta-data was not getting copied over. By overloading _update_from we
    are able to work around this bug.
    """

    # Make sure that meta-data gets propagated with __getitem__

    c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
    assert c[1:2].name == 'a'
    assert c[1:2].description == 'b'
    assert c[1:2].unit == 'm'
    assert c[1:2].format == '%i'
    assert c[1:2].meta['c'] == 8

    c = table.MaskedColumn(data=[1, 2], name='a', description='b',
                           unit='m', format="%i", meta={'c': 8})
    assert c[1:2].name == 'a'
    assert c[1:2].description == 'b'
    assert c[1:2].unit == 'm'
    assert c[1:2].format == '%i'
    assert c[1:2].meta['c'] == 8

    # As above, but with take() - check the method and the function

    c = table.Column(data=[1, 2, 3], name='a', description='b',
                     unit='m', format="%i", meta={'c': 8})
    for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
        assert subset.name == 'a'
        assert subset.description == 'b'
        assert subset.unit == 'm'
        assert subset.format == '%i'
        assert subset.meta['c'] == 8

    # Metadata isn't copied for scalar values
    for subset in [c.take(0), np.take(c, 0)]:
        assert subset == 1
        assert subset.shape == ()
        assert not isinstance(subset, table.Column)

    c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b',
                           unit='m', format="%i", meta={'c': 8})
    for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
        assert subset.name == 'a'
        assert subset.description == 'b'
        assert subset.unit == 'm'
        assert subset.format == '%i'
        assert subset.meta['c'] == 8

    # Metadata isn't copied for scalar values
    for subset in [c.take(0), np.take(c, 0)]:
        assert subset == 1
        assert subset.shape == ()
        assert not isinstance(subset, table.MaskedColumn)
Exemplo n.º 19
0
    def to_table(self):

        ks = self.subsample_keys
        tab = t.Table(rows=[self.FSPS_args])

        # make way for properly separated columns!
        for k in ks:
            del tab[k]

        tab.add_column(
            t.Column(data=[self.frac_mform_dt(age=.02)], name='F_20M'))
        tab.add_column(
            t.Column(data=[self.frac_mform_dt(age=.05)], name='F_50M'))
        tab.add_column(
            t.Column(data=[self.frac_mform_dt(age=.1)], name='F_100M'))
        tab.add_column(
            t.Column(data=[self.frac_mform_dt(age=.2)], name='F_200M'))
        tab.add_column(
            t.Column(data=[self.frac_mform_dt(age=.5)], name='F_500M'))
        tab.add_column(t.Column(data=[self.frac_mform_dt(age=1)], name='F_1G'))
        tab.add_column(t.Column(data=[self.mass_weighted_age], name='MWA'))
        tab.add_column(t.Column(data=[self.mstar], name='mstar'))

        tab = t.vstack([
            tab,
        ] * self.Nsubsample)

        for k in ks:
            tab.add_column(t.Column(data=self.FSPS_args[k], name=k))

        return tab
def match_splat_barton(splat, barton):
    closest_inds = []
    for row in splat:
        closest = np.argmin(np.abs(barton['Freq']-row['Freq']))
        closest_inds.append(closest)

    barton_ = barton[np.array(closest_inds)]
    result = table.hstack([splat, barton_])
    result.add_column(table.Column(data=result['Freq_1'] - result['Freq_2'], name='Splat-Barton'))
    result.add_column(table.Column(data=(result['Freq_1'] - result['Freq_2'])/result['Freq_2']*3e5, name='Splat-Barton_kms'))
    return result
Exemplo n.º 21
0
    def __init__(self, num_spectra=-1):
        self.num_spectra = num_spectra
        self.np_spectrum = NpSpectrumContainer(readonly=False, num_spectra=num_spectra)
        self.continuum_fit_metadata = table.Table()
        self.continuum_fit_metadata.add_columns(
            [table.Column(name='index', dtype='i8', unit=None, length=num_spectra),
             table.Column(name='is_good_fit', dtype='b', unit=None, length=num_spectra),
             table.Column(name='goodness_of_fit', dtype='f8', unit=None, length=num_spectra),
             table.Column(name='snr', dtype='f8', unit=None, length=num_spectra)])

        # initialize array
        self.np_spectrum.zero()
Exemplo n.º 22
0
    def make_spectra_tables(nebins, spec_dict):
        """Construct the spectral values table

        Returns
        -------

        table : `astropy.table.Table`
            The table has these columns

        ref_<par_names> : float
            The value of other parameters (aside from the normalization)

        ref_spec : int
            The index of the stacking spectrum for this row

        ref_dnde : array
            The reference differential photon flux fpr each energy [ph / (MeV cm2 s)]

        ref_flux : array
            The reference integral photon flux for each energy [ph / (cm2 s)]

        ref_eflux : array
            The reference integral energy flux for each energy [MeV / (cm2 s)]

        """
        table_dict = {}
        for spec, spec_data in spec_dict.items():
            par_dict = spec_data['params']
            cols = []
            for par_name, par_value in par_dict.items():
                col_par = table.Column(name="par_%s" % par_name,
                                       dtype=float,
                                       data=par_value)
                cols.append(col_par)
            col_dnde = table.Column(name="ref_dnde",
                                    dtype=float,
                                    shape=nebins,
                                    unit="ph / (MeV cm2 s)",
                                    data=spec_data['dnde'])
            col_flux = table.Column(name="ref_flux",
                                    dtype=float,
                                    shape=nebins,
                                    unit="ph / (cm2 s)",
                                    data=spec_data['flux'])
            col_eflux = table.Column(name="ref_eflux",
                                     dtype=float,
                                     shape=nebins,
                                     unit="MeV / (cm2 s)",
                                     data=spec_data['eflux'])
            cols += [col_dnde, col_flux, col_eflux]
            otable = table.Table(data=cols)
            table_dict[spec] = otable
        return table_dict
Exemplo n.º 23
0
def add_w(t):
    kplx = gen.k / t["plx"]
    w1 = np.zeros_like(t["v_r"])
    w2 = kplx * t["muphi*"]  # muphi* = muphi * cos theta
    w3 = kplx * t["mutheta"]

    t.add_column(table.Column(data=w1, name="w1", unit="km / s"))
    t["w1"].axis_label = r"$w_1$"
    t.add_column(table.Column(data=w2, name="w2", unit="1 / yr"))
    t["w2"].axis_label = r"$w_2$"
    t.add_column(table.Column(data=w3, name="w3", unit="1 / yr"))
    t["w3"].axis_label = r"$w_3$"
Exemplo n.º 24
0
def add_UVW(t,
            w_vec_col="w_vec",
            R_inv_col="R^-1",
            components=True,
            vector=True):
    UVWs = UVW_wR_many(t[w_vec_col], t[R_inv_col])
    if components:
        t.add_column(table.Column(data=UVWs[:, 0], name="U", unit="km / s"))
        t.add_column(table.Column(data=UVWs[:, 1], name="V", unit="km / s"))
        t.add_column(table.Column(data=UVWs[:, 2], name="W", unit="km / s"))
    if vector:
        t.add_column(table.Column(data=UVWs, name="UVW_vec", unit="km / s"))
Exemplo n.º 25
0
    def assign_cosmos_ids(self, method="random", randomseed=90000):
        ncosmos = len(self.cosmos_cat)
        np.random.seed(randomseed)
        i = np.random.randint(0, ncosmos, self.nobj)

        ids = self.cosmos_cat[i]["ident"]

        newcol1 = tb.Column(i, "cosmos_index")
        newcol2 = tb.Column(ids, "cosmos_ident")

        for b in self.bands:
            exec "self.%s.add_column(newcol1)" % b
            exec "self.%s.add_column(newcol2)" % b
Exemplo n.º 26
0
    def __read_spec_ws_ivar_from_fits(self,
                                      u_spec=1.e-17 *
                                      u.Unit('erg / (Angstrom cm2 s)'),
                                      u_ws=u.AA,
                                      wunit=True):
        """
		Params
		------
		self
		u_spec=1.e-17*u.Unit('erg / (Angstrom cm2 s)')
		u_ws=u.AA
		wunit=True

		Return
		------
		spec (nparray)
		ws (nparray)
		ivar (nparray)

		Default units
		-------------
		u_spec = 1.e-17*u.Unit('erg / (Angstrom cm2 s)')
		u_ws = u.AA
		"""
        fn = self.fp_spec
        if self.survey_spec in ['sdss', 'boss', 'boss']:
            if os.path.isfile(fn):
                hdus = fits.open(fn)
            else:
                raise IOError("[Spector] spec fits file does not exist")

            hdus[0].header
            spectable = hdus[1].data
            spec, ws, ivar = spectable['flux'], 10.**spectable[
                'loglam'], spectable['ivar']

            if wunit:
                spec = spec * u_spec
                ws = ws * u_ws
                ivar = ivar / (u_spec**2)

            # instrument_header = at.Table(hdus[2].data)['INSTRUMENT'][0].lower()
            # if self.survey_spec != instrument_header:
            # 	self.survey_spec = instrument_header
            # 	print("[Spector] updating survey_spec to reflect instrument in spec.fits header -- {}".format(instrument_header))

            return at.Column(spec, name=['spec']), at.Column(
                ws, name=['ws']), at.Column(ivar, name=['ivar'])
        else:
            raise NameError("[Spector] survey_spec not recognized")
Exemplo n.º 27
0
def mixin_cols(request):
    """
    Fixture to return a set of columns for mixin testing which includes
    an index column 'i', two string cols 'a', 'b' (for joins etc), and
    one of the available mixin column types.
    """
    cols = OrderedDict()
    mixin_cols = deepcopy(MIXIN_COLS)
    cols['i'] = table.Column([0, 1, 2, 3], name='i')
    cols['a'] = table.Column(['a', 'b', 'b', 'c'], name='a')
    cols['b'] = table.Column(['b', 'c', 'a', 'd'], name='b')
    cols['m'] = mixin_cols[request.param]

    return cols
Exemplo n.º 28
0
def table_setup(f, first, output_table, cat_generated):
    if first:
        column = table.Column(name=f + '_mag_generated', length=len(output_table))
        output_table.add_column(column)
        column = table.Column(name=f + '_mag_recovered', length=len(output_table))
        output_table.add_column(column)
        column = table.Column(name=f + '_subtraction_successful', length=len(output_table))
        output_table.add_column(column)
        column = table.Column(name=f + '_difference', length=len(output_table))
        output_table.add_column(column)
        column = table.Column(name=f + '_matching_distance_arcsec', length=len(output_table))
        output_table.add_column(column)
        column = table.Column(name=f + '_nuclear_offset_pix_x', length=len(output_table))
        output_table.add_column(column)
        column = table.Column(name=f + '_nuclear_offset_pix_y', length=len(output_table))
        output_table.add_column(column)
        for col in cat_generated.colnames:
            if col not in ['type', 'source']:
                dtype = float
            else:
                dtype = 'S20'
            column = table.Column(name=f + '_' + col, length=len(output_table), dtype=dtype)
            output_table.add_column(column)

    return output_table
Exemplo n.º 29
0
 def test_col_and_masked_col(self):
     c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',
                       description='test column', meta={'c': 8, 'd': 12})
     c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',
                             description='test column', meta={'c': 8, 'd': 12})
     assert c1.attrs_equal(c2)
     assert c2.attrs_equal(c1)
Exemplo n.º 30
0
def makeCombinedQTable(config):
    """Writes dictionary of tables (containing individual tile Q fits) as a single .fits table.
    
    Returns combined Q astropy table object
    
    """

    outFileName = config.selFnDir + os.path.sep + "QFit.fits"
    if os.path.exists(outFileName) == True:
        return atpy.Table().read(outFileName)

    QTabDict = {}
    for tileName in config.allTileNames:
        QTabDict[tileName] = atpy.Table().read(config.selFnDir + os.path.sep +
                                               "QFit#%s.fits" % (tileName))

    combinedQTab = atpy.Table()
    for tabKey in list(QTabDict.keys()):
        for colKey in QTabDict[tabKey].keys():
            if colKey == 'theta500Arcmin':
                if colKey not in combinedQTab.keys():
                    combinedQTab.add_column(QTabDict[tabKey]['theta500Arcmin'],
                                            index=0)
            else:
                combinedQTab.add_column(
                    atpy.Column(QTabDict[tabKey][colKey].data, tabKey))
    combinedQTab.meta['NEMOVER'] = nemo.__version__
    combinedQTab.write(outFileName, overwrite=True)

    return combinedQTab