Esempio n. 1
0
def test_skycoord_mixin(tmpdir):

    t = table.Table()
    t['a'] = [1, 2]
    t['b'] = ['x', 'y']
    t['c'] = SkyCoord([1, 2], [3, 4],
                      unit='deg,deg',
                      frame='fk4',
                      obstime='J1990.5')

    def check(ff):
        assert isinstance(ff['table']['c'], SkyCoord)

    def tree_match(old, new):
        NDArrayType.assert_equal(new['a'], old['a'])
        NDArrayType.assert_equal(new['b'], old['b'])
        assert skycoord_equal(new['c'], old['c'])

    helpers.assert_roundtrip_tree({'table': t},
                                  tmpdir,
                                  asdf_check_func=check,
                                  tree_match_func=tree_match)
def sextract(image,
             catalogName,
             flux_radii=[0.25, 0.5, 0.75],
             checkIm=None,
             checkImType=None):
    if checkIm == None:
        cmd = 'sex ' + image + ' -c chi2.sex -CATALOG_NAME ' + catalogName
    else:
        cmd = 'sex ' + image + ' -c chi2.sex -CATALOG_NAME ' + catalogName + ' -CHECKIMAGE_TYPE ' + checkImType + ' -CHECKIMAGE_NAME ' + checkIm + ' -PSF_NAME default.psf'
    os.system(cmd)

    # Expand FLUX_RADIUS column
    try:
        t_sex = table.Table(fits.getdata(catalogName, 1))
        for col, rad in enumerate(flux_radii):
            t_sex['FLUX_RADIUS_' + str(rad)] = np.array(
                t_sex['FLUX_RADIUS'])[:, col]
        t_sex.remove_column('FLUX_RADIUS')
        t_sex.write(catalogName, overwrite=True)
        return 0
    except KeyError:
        return 1
Esempio n. 3
0
def convertable(table):
    colnames = table.colnames
    if 'Uncertainty Integrated Flux' in colnames:
        colnames[2] = 'Unc. Int. Flux'
        table.rename_column('Uncertainty Integrated Flux', 'Unc. Int. Flux')
    else:
        pass
    for i in range(1, len(colnames)):
        firstentry = table[colnames[i]][0]
        if (type(firstentry) == np.str_):
            continue
        else:
            column = (table[colnames[i]].data).astype(float)
            ref = np.min(column)
        if ('RMS' in colnames[i]) or ('chi'
                                      in colnames[i]) or ('FWHM'
                                                          in colnames[i]):
            for j in range(len(table)):
                table[colnames[i]][j] = round(table[colnames[i]][j], 2)
        elif (len(str(ref).split('.')[0]) > 3
              and ref) > 1 or (len(str(ref).split('.')[1]) > 3 and ref < 1):
            refstr = '%.2E' % ref
            conv = eval('1E%i' % float(refstr.split('E')[1]))
            for j in range(len(table)):
                table[colnames[i]][j] = round(table[colnames[i]][j] / conv, 2)
            colnames[i] += ' (x 10^%i)' % float(refstr.split('E')[1])
        elif ('Fitted' in colnames[i]):
            for j in range(len(table)):
                table[colnames[i]][j] = np.unicode(table[colnames[i]][j])
        else:
            for j in range(len(table)):
                table[colnames[i]][j] = round(table[colnames[i]][j], 2)
    newtab = asttab.Table(names=colnames, data=table.as_array())
    if 'Fit ChiSquare' in colnames:
        newtab.rename_column('Fit ChiSquare', '$\chi^2$')
    else:
        pass

    return newtab
Esempio n. 4
0
def crosscorr(lens,
              source,
              lensColumns=['RADeg', 'decDeg', 'redshift'],
              sourceColumns=['ra2000', 'decl2000'],
              binmax=5 * u.Mpc,
              cosmo=None):
    """ Cross-correlate lens and source tables
    create lens-source pairs table, returning for each  'id','ra','dec','e1','e2','sigma_e','zs','Pz','ra_l','dec_l','zl' """
    raL, decL, zL = lensColumns
    raS, decS = sourceColumns

    if cosmo is None:
        cosmo = FlatLambdaCDM(
            H0=100,
            Om0=0.27,
        )

    galcoords = SkyCoord(source[raS], source[decS], unit='deg')
    #source_stack = t.Table( data=None, names  = ('R','ra','dec','ra_l','dec_l','zl') )
    R = list()
    zl = list()
    for cluster in lens:
        #source_cl = t.Table( data=None, names  = ('R','ra','dec','ra_l','dec_l','zl') )
        cluster = np.array(cluster)
        z_cl = cluster[zL]
        clcenter = SkyCoord(cluster[raL], cluster[decL], unit='deg')
        Rarcmin = clcenter.separation(galcoords).to(u.arcmin)
        Rmpc = (Rarcmin * cosmo.kpc_comoving_per_arcmin(z_cl)).to(
            u.Mpc)  # in Mpc
        Iclgals = (Rmpc <= binmax)  # binmax in Mpc
        if len(np.where(Iclgals)[0]) == 0:
            print "no galaxies for this cluster at redshift: %.3f " % z_cl
            continue
        print "cluster redshift: %.3f " % z_cl
        R.extend(Rmpc[Iclgals].value)
        zl.extend(z_cl * np.ones(np.sum(Iclgals)))
    del source
    gc.collect()
    return t.Table([np.array(R), np.array(zl)], names=('R', 'zl'))
Esempio n. 5
0
def test_table_inline(tmpdir):
    data_rows = [(1, 2.0, 'x'),
                 (4, 5.0, 'y'),
                 (5, 8.2, 'z')]
    t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
                    dtype=('i4', 'f8', 'S1'))
    t.columns['a'].description = 'RA'
    t.columns['a'].unit = 'degree'
    t.columns['a'].meta = {'foo': 'bar'}
    t.columns['c'].description = 'Some description of some sort'

    def check(ff):
        assert len(list(ff.blocks.internal_blocks)) == 0

    if Version(asdf.__version__) >= Version('2.8.0'):
        # The auto_inline argument is deprecated as of asdf 2.8.0.
        with asdf.config_context() as config:
            config.array_inline_threshold = 64
            helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
    else:
        helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
                                      write_options={'auto_inline': 64})
Esempio n. 6
0
def create_m2fs_fiber_info_table(datapath, dataname, cams=['r', 'b']):
    datapathname = os.path.join(datapath, dataname)
    od = {
        'ID': [],
        'FIBNAME': [],
        'm2fs_fiberID': [],
        'm2fs_fab': [],
        'm2fs_CH': []
    }
    for cam in cams:
        fulldatapathname = datapathname.format(cam=cam)

        if not os.path.exists(fulldatapathname):
            print("Data not found at path: {}".format(fulldatapathname))
            continue

        hdr = fits.getheader(fulldatapathname, 1)
        fibs = [key for key in hdr.keys() if 'FIBER' in key]
        for fib in fibs:
            id = hdr[fib]
            if id != 'unplugged' and len(hdr.comments[fib]) > 0:
                comm = hdr.comments[fib]
                fid, fab, ch = comm.split(' ')
                od['m2fs_fiberID'].append(fid.split('=')[1])
                od['m2fs_fab'].append(fab.split('=')[1])
                od['m2fs_CH'].append(ch.split('=')[1])
            else:
                od['m2fs_fiberID'].append('N/A')
                od['m2fs_fab'].append('N/A')
                od['m2fs_CH'].append('N/A')
            od['ID'].append(id)
            od['FIBNAME'].append(fib.replace('FIBER', cam))

    if np.all(np.array(od['m2fs_fiberID']) == 'N/A'):
        od.pop('m2fs_fiberID')
        od.pop('m2fs_fab')
        od.pop('m2fs_CH')
    otab = table.Table(od)
    return otab
Esempio n. 7
0
    def table(self):
        bin_llims = np.meshgrid(*[bd.binbounds[:-1] for bd in self.bindefs],
                                indexing='ij')
        bin_ulims = np.meshgrid(*[bd.binbounds[1:] for bd in self.bindefs],
                                indexing='ij')
        bin_ix = np.meshgrid(*list(range(s) for s in self.shape))

        tab = t.Table(data=[range(np.prod(self.shape))], names=['num'])
        llims_names = ['{}_llim'.format(bd.name) for bd in self.bindefs]
        ulims_names = ['{}_ulim'.format(bd.name) for bd in self.bindefs]
        ix_names = ['{}_ix'.format(bd.name) for bd in self.bindefs]

        for ll, lln, ul, uln, ix, ixn in zip(bin_llims, llims_names, bin_ulims,
                                             ulims_names, bin_ix, ix_names):

            tab[lln] = ll.flatten()
            tab[uln] = ul.flatten()
            tab[ixn] = ix.flatten()

        tab.add_index('num')

        return tab
def bin(tab, bins):
    # define the bins and add columns
    bins = np.array(bins)
    nbins = len(bins) - 1
    bin_number = (tab['YEAR'] >= bins[:, None]).sum(axis=0) - 1
    keep = (bin_number >= 0) * (bin_number < nbins)
    tab = tab[keep]
    bin_number = bin_number[keep]
    bin = [f"{bins[b]}-{bins[b+1]-1}" for b in bin_number]
    binsize = bins[bin_number + 1] - bins[bin_number]
    tab.remove_column('YEAR')
    tab.add_column(bin, name='YEARS', index=1)
    tab.add_column(binsize, name='BINSIZE')
    # do the rebinning
    tab = tab.group_by(['LOCATION', 'YEARS'])
    cols = [
        average_column(c, tab['BINSIZE']) for c in tab.columns.values()
        if c.name != 'BINSIZE'
    ]
    names = [c for c in tab.colnames if c != 'BINSIZE']
    tab = table.Table(cols, names=names)
    return tab
Esempio n. 9
0
    def __init__(self, model_grid):
        """Initialize an LDC object

        Parameters
        ----------
        model_grid: exoctk.modelgrid.ModelGrid
            The grid of synthetic spectra from which the coefficients will
            be calculated
        """
        # Set the model grid
        # if not isinstance(model_grid, modelgrid.ModelGrid):
        #     raise TypeError("'model_grid' must be a exoctk.modelgrid.ModelGrid object.")

        self.model_grid = model_grid

        # Table for results
        columns = [
            'Teff', 'logg', 'FeH', 'profile', 'filter', 'coeffs', 'errors',
            'wave', 'wave_min', 'wave_eff', 'wave_max', 'scaled_mu', 'raw_mu',
            'mu_min', 'scaled_ld', 'raw_ld', 'ld_min', 'ldfunc', 'flux',
            'bandpass'
        ]
        dtypes = [
            float, float, float, '|S20', '|S20', object, object, object,
            np.float16, np.float16, np.float16, object, object, np.float16,
            object, object, np.float16, object, object, object
        ]
        self.results = at.Table(names=columns, dtype=dtypes)

        self.ld_color = {
            'quadratic': 'blue',
            '4-parameter': 'red',
            'exponential': 'green',
            'linear': 'orange',
            'square-root': 'cyan',
            '3-parameter': 'magenta',
            'logarithmic': 'pink',
            'uniform': 'purple'
        }
Esempio n. 10
0
    def __init__(self, toafile=None, toalist=None):
        # First, just make an empty container
        self.toas = []
        self.commands = []
        self.filename = None
        self.planets = False
        self.ephem = None
        self.clock_corr_info = {}

        if (toalist is not None) and (toafile is not None):
            log.error('Cannot initialize TOAs from both file and list.')

        if toafile is not None:
            # FIXME: work with file-like objects as well
            # Check for a pickle-like filename.  Alternative approach would
            # be to just try opening it as a pickle and see what happens.
            if toafile.endswith('.pickle') or toafile.endswith('pickle.gz'):
                log.info('Reading TOAs from pickle file')
                self.read_pickle_file(toafile)
            else: # Not a pickle file, process as a standard set of TOA lines
                self.read_toa_file(toafile)
                self.filename = toafile

        if toalist is not None:
            if not isinstance(toalist, (list, tuple)):
                log.error('Trying to initialize TOAs from a non-list class')
            self.toas = toalist

        if not hasattr(self, 'table'):
            mjds = self.get_mjds(high_precision=True)
            # The table is grouped by observatory
            self.table = table.Table([numpy.arange(len(mjds)), mjds, self.get_mjds(),
                                      self.get_errors(), self.get_freqs(),
                                      self.get_obss(), self.get_flags()],
                                      names=("index", "mjd", "mjd_float", "error",
                                             "freq", "obs", "flags"),
                                      meta={'filename':self.filename}).group_by("obs")
        # We don't need this now that we have a table
        del(self.toas)
Esempio n. 11
0
def view_log(database, table, limit=50):
    """Visually inspect the job log.

    Parameters
    ----------
    database : str or ``sqlite3.connection.cursor`` obj
        The database cursor object
    table : str
        The table name
    limit : int
        The number of records to show

    Returns
    -------
    table : ``astropy.Table`` obj
        An astropy.table object containing the results.

    """

    if isinstance(database, str):
        DB = load_db(database)
    elif isinstance(database, sqlite3.Cursor):
        DB = database
    else:
        print("Please enter the path to a .db file or a sqlite.Cursor object.")

    # Query the database
    colnames = np.array(DB.execute("PRAGMA table_info('{}')".format(table)).fetchall()).T[1]
    results = DB.execute("SELECT * FROM {} LIMIT {}".format(table, limit)).fetchall()

    # Empty table
    table = at.Table(names=colnames, dtype=['O'] * len(colnames))

    # Add the results
    if len(results) > 0:
        for row in results:
            table.add_row(row)

    return table
Esempio n. 12
0
def fits2csv(fits_name):
    import numpy as np
    import pandas as pd
    from astropy import table
    from astropy.io import fits
    import healpy.pixelfunc as pf

    hdulist = fits.open(fits_name)
    tbdata = hdulist[1].data
    ra = tbdata['RA']
    dec = tbdata['DEC']
    z = tbdata['Z']
    absmag = tbdata['AMAG']
    appmag = tbdata['OMAG']
    d = {
        'zobs': z.astype('float64'),
        'ra': ra.astype('float64'),
        'dec': dec.astype('float64'),
        'u_ab': absmag[:, 0].astype('float64'),
        'g_ab': absmag[:, 1].astype('float64'),
        'r_ab': absmag[:, 2].astype('float64'),
        'i_ab': absmag[:, 3].astype('float64'),
        'z_ab': absmag[:, 4].astype('float64'),
        'u_ap': appmag[:, 0].astype('float64'),
        'g_ap': appmag[:, 1].astype('float64'),
        'r_ap': appmag[:, 2].astype('float64'),
        'i_ap': appmag[:, 3].astype('float64'),
        'z_ap': appmag[:, 4].astype('float64')
    }
    fit_table = table.Table(d)
    data = fit_table.to_pandas()
    hdulist.close()
    data['big_pix'] = pf.ang2pix(32,
                                 theta=data['ra'],
                                 phi=data['dec'],
                                 lonlat=True,
                                 nest=False)
    return (data)
def _add_aperture_table(ad, center):
    """
    Adds a fake aperture table to the `AstroData` object.

    Parameters
    ----------
    ad : AstroData
    center : int

    Returns
    -------
    AstroData : the input data with an `.APERTURE` table attached to it.
    """
    width = ad[0].shape[1]

    aperture = table.Table(
        [[1],  # Number
         [1],  # ndim
         [0],  # degree
         [0],  # domain_start
         [width - 1],  # domain_end
         [center],  # c0
         [-5],  # aper_lower
         [5],  # aper_upper
         ],
        names=[
            'number',
            'ndim',
            'degree',
            'domain_start',
            'domain_end',
            'c0',
            'aper_lower',
            'aper_upper']
    )

    ad[0].APERTURE = aperture
    return ad
Esempio n. 14
0
def test_string_truncation_warning(masked):
    """
    Test warnings associated with in-place assignment to a string
    column that results in truncation of the right hand side.
    """
    from inspect import currentframe, getframeinfo

    t = table.Table([['aa', 'bb']], names=['a'], masked=masked)
    t['a'][1] = 'cc'
    t['a'][:] = 'dd'

    with pytest.warns(table.StringTruncateWarning,
                      match=r'truncated right side '
                      r'string\(s\) longer than 2 character\(s\)') as w:
        frameinfo = getframeinfo(currentframe())
        t['a'][0] = 'eee'  # replace item with string that gets truncated
    assert t['a'][0] == 'ee'
    assert len(w) == 1

    # Make sure the warning points back to the user code line
    assert w[0].lineno == frameinfo.lineno + 1
    assert 'test_column' in w[0].filename

    with pytest.warns(table.StringTruncateWarning,
                      match=r'truncated right side '
                      r'string\(s\) longer than 2 character\(s\)') as w:
        t['a'][:] = ['ff',
                     'ggg']  # replace item with string that gets truncated
    assert np.all(t['a'] == ['ff', 'gg'])
    assert len(w) == 1

    # Test the obscure case of assigning from an array that was originally
    # wider than any of the current elements (i.e. dtype is U4 but actual
    # elements are U1 at the time of assignment).
    val = np.array(['ffff', 'gggg'])
    val[:] = ['f', 'g']
    t['a'][:] = val
    assert np.all(t['a'] == ['f', 'g'])
Esempio n. 15
0
    def setup_south2008(self):
        """Set-up for tests that use southern 2008 ACT data - downloads needed files from LAMBDA if not 
        found.
        
        """

        thisDir = os.getcwd()
        self.inMapFileName = self.cacheDir + os.path.sep + "ACT_148_south_season_2_1way_v3_summed.fits"
        if os.path.exists(self.inMapFileName) == False:
            print(">>> Downloading South 2008 data ...")
            os.chdir(self.cacheDir)
            os.system(
                "wget https://lambda.gsfc.nasa.gov/data/suborbital/ACT/data2013/Maps/AR1/South/ACT_148_south_season_2_1way_v3_summed.fits"
            )
            os.system(
                "wget https://lambda.gsfc.nasa.gov/data/suborbital/ACT/data2013/Weights/AR1/South/ACT_148_south_season_2_1way_hits_v3.fits"
            )
            os.system(
                "wget https://lambda.gsfc.nasa.gov/data/suborbital/ACT/data2013/Beams/profiles/profile_AR1_2008_pixwin_130224.txt"
            )
            os.system(
                "wget https://lambda.gsfc.nasa.gov/data/suborbital/ACT/Cluster_src/Ptsrc_cat/act_source_catalog_AR1_2008.txt"
            )
            os.chdir(thisDir)

        # Need to convert published catalog such that comparison routines work
        # NOTE: loading the table this way breaks the name column but we soldier on...
        tabFileName = self.cacheDir + os.path.sep + "act_source_catalog_AR1_2008.fits"
        if os.path.exists(tabFileName) == False:
            tab = atpy.Table().read(self.cacheDir + os.path.sep +
                                    "act_source_catalog_AR1_2008.txt",
                                    format='ascii')
            tab.rename_column("col2", "name")
            tab.rename_column("col3", "RADeg")
            tab.rename_column("col4", "decDeg")
            tab.rename_column("col6", "fluxJy")
            tab['fluxJy'] = tab['fluxJy'] / 1000.0
            tab.write(tabFileName)
Esempio n. 16
0
def test_multidim_column_error(fmt_name_class):
    """
    Test that trying to write a multidim column fails in every format except
    ECSV.
    """
    fmt_name, fmt_cls = fmt_name_class

    if not getattr(fmt_cls, '_io_registry_can_write', True):
        return

    # Skip tests for ecsv or HTML without bs4. See the comment in latex.py
    # Latex class where max_ndim = None is defined regarding latex and aastex.
    if ((fmt_name == 'html' and not HAS_BS4)
            or fmt_name in ('ecsv', 'latex', 'aastex')):
        return

    out = StringIO()
    t = table.Table()
    t['a'] = np.arange(16).reshape(2, 2, 2, 2)
    t['b'] = [1, 2]
    fast = fmt_name in ascii.core.FAST_CLASSES
    with pytest.raises(ValueError, match=r'column\(s\) with dimension'):
        ascii.write(t, out, format=fmt_name, fast_writer=fast)
Esempio n. 17
0
def test_uint_indexing():
    """
    Test that accessing a row with an unsigned integer
    works as with a signed integer.  Similarly tests
    that printing such a row works.

    This is non-trivial: adding a signed and unsigned
    integer in numpy results in a float, which is an
    invalid slice index.

    Regression test for gh-7464.
    """
    t = table.Table([[1., 2., 3.]], names='a')
    assert t['a'][1] == 2.
    assert t['a'][np.int_(1)] == 2.
    assert t['a'][np.uint(1)] == 2.
    assert t[np.uint(1)]['a'] == 2.

    trepr = ['<Row index=1>', '   a   ', 'float64', '-------', '    2.0']

    assert repr(t[1]).splitlines() == trepr
    assert repr(t[np.int_(1)]).splitlines() == trepr
    assert repr(t[np.uint(1)]).splitlines() == trepr
Esempio n. 18
0
    def tbl_to_astropy(tbl):
        typedict = { "int": "i4", \
                         "long": "i8", \
                         "float": "f4", \
                         "double": "f8", \
                         "real": "f8", \
                         "char": "S", \
                         "date": "S" }

        collist = []
        for n in tbl.colnames:
            col = tbl.cols[n]
            longtype = IPACExpandType(col.type)

            m = [not x for x in col.mask]
            newC = aptb.MaskedColumn( col.data, name=n, \
                                          mask=m, dtype=typedict[longtype] )

            collist.append(newC)

        newT = aptb.Table(collist)

        return newT
Esempio n. 19
0
def sort_raw_data(images, min_visit_separation=0.2):
    metadata = table.Table()
    metadata['filename'] = images
    metadata['expstart'] = [fits.getval(image, 'EXPSTART') for image in images]
    metadata['instrument'] = [
        fits_utils.get_instrument(image) for image in images
    ]
    metadata['visit'] = 'visit1'  # initialize the column
    metadata['filter'] = [get_filter_name(image) for image in images]
    metadata.sort('expstart')
    visit_starts, = np.where(
        np.diff(metadata['expstart']) > min_visit_separation)
    for visit_num, first_row in enumerate(visit_starts):
        metadata['visit'][first_row + 1:] = 'visit{:d}'.format(visit_num + 2)

    for image in metadata:
        folder_name = os.path.join(image['instrument'], image['visit'],
                                   image['filter'])
        utils.copy_if_not_exists(image['filename'], folder_name)

    visit_list = metadata.group_by(['instrument', 'visit', 'filter'])

    return visit_list.groups.keys
Esempio n. 20
0
def join_csv(filenames: [str], output: str):
    n = len(filenames)
    tables = []
    for file in filenames:
        if file[-4:] == '.csv':
            tables.append(table.Table.read(file, format='ascii.csv'))
        elif file[-5:] == '.fits':
            tables.append(table.Table(fits.open(file)[1].data))
        else:
            raise ValueError('File format not recognised.')

    output_tbl = table.hstack(tables)
    for col in output_tbl.colnames:
        if col[-2:] == '_1':
            col_new = col[:-2]
            column = output_tbl[col]
            output_tbl.remove_column(col)
            output_tbl[col_new] = column
            for i in range(2, n + 1):
                output_tbl.remove_column(col_new + '_' + str(i))

    print('Writing to', output)
    output_tbl.write(output, format='ascii.csv', overwrite=True)
Esempio n. 21
0
def Wbinning_bserr(x, val, err=0, w=None, minr=0, maxr=1e10, nbinr=10,Nbs=100):
    """ log-spaced  binned weighted average with boostrap errors"""
    import astropy.table as t
    bins = np.logspace(np.log10(minr), np.log10(maxr), nbinr+1)
    bin_min, bin_max, bincenter = logBinsHarmonic(minr, maxr, nbinr)
    valbin, binedge = np.histogram( x, weights = np.nan_to_num(val*w), bins=bins)
    val2bin, _ = np.histogram( x, weights = np.nan_to_num((val*w)**2), bins=bins)
    wbin,  _ = np.histogram( x, weights = w, bins=bins)
    w2bin,  _ = np.histogram( x, weights = w**2, bins=bins)
    binvalw = valbin/wbin
    binerr2 = np.sqrt(val2bin)/wbin # regular error
    binvalw_bs = np.ones([Nbs,nbinr])
    for i in range(Nbs):
        bs = np.random.choice(t.Table([x,val,w],names=('x','val','w')),size=len(x),replace=True)
        xbs = bs['x']; valbs = bs['val']; wbs = bs['w']
        valbin_i, _ = np.histogram( xbs, weights = np.nan_to_num(valbs*wbs), bins=bins)
        wbin_i,  _ = np.histogram( xbs, weights = wbs, bins=bins)
        binvalw_bs[i] = valbin_i/wbin_i
    binerr = np.std(binvalw_bs,axis=0) # bootstrap errors
    #Nwbin = wbin**2/w2bin
    Nbin, _ = np.histogram( x,  bins=bins) 
    #STD = np.sqrt(val2bin/wbin - binvalw**2)
    return binvalw , binerr, binerr2, bincenter, bin_min, bin_max, Nbin, wbin,w2bin
Esempio n. 22
0
def get_light_curve_array(objid, ptrobs_min, ptrobs_max):
    """ Get lightcurve from fits file as an array - avoid some Pandas overhead

    Parameters
    ----------
    objid : str
        The object ID. E.g. objid='DDF_04_NONIa-0004_87287'
    ptrobs_min : int
        Min index of object in _PHOT.FITS.
    ptrobs_max : int
        Max index of object in _PHOT.FITS.

    Return
    -------
    phot_out: pandas DataFrame
        A DataFrame containing the MJD, FLT, FLUXCAL, FLUXCALERR, ZEROPT seperated by each filter.
        E.g. Access the magnitude in the z filter with phot_out['z']['MAG'].
    """
    field, model, base, snid = objid.split('_')
    if field == 'IDEAL':
        filename = "{0}_MODEL{1}/{0}_{2}_PHOT.FITS".format(field, model, base)
    else:
        filename = "LSST_{0}_MODEL{1}/LSST_{0}_{2}_PHOT.FITS".format(
            field, model, base)
    phot_file = os.path.join(DATA_DIR, data_release, filename)
    if not os.path.exists(phot_file):
        phot_file = phot_file + '.gz'

    try:
        phot_HDU = afits.open(phot_file, memmap=True)
    except Exception as e:
        message = f'Could not open photometry file {phot_file}'
        raise RuntimeError(message)

    phot_data = phot_HDU[1].data[ptrobs_min - 1:ptrobs_max]
    phot_data = at.Table(phot_data)
    return phot_data
Esempio n. 23
0
def getFRelWeights(config):
    """Returns a dictionary of frequency weights used in relativistic corrections for each tile. This is 
    cached in the selFn/ dir after the first time this routine is called.
    
    """

    if 'photFilter' not in config.parDict.keys(
    ) or config.parDict['photFilter'] is None:
        return {}

    fRelWeightsFileName = config.selFnDir + os.path.sep + "fRelWeights.fits"
    if os.path.exists(fRelWeightsFileName) == False:
        fRelTab = atpy.Table()
        fRelTab.add_column(atpy.Column(config.allTileNames, 'tileName'))
        for tileCount in range(len(config.allTileNames)):
            tileName = config.allTileNames[tileCount]
            filterFileName = config.diagnosticsDir + os.path.sep + tileName + os.path.sep + "filter_%s#%s.fits" % (
                config.parDict['photFilter'], tileName)
            with pyfits.open(filterFileName) as img:
                for i in range(1, 10):
                    if 'RW%d_GHZ' % (i) in img[0].header.keys():
                        freqGHz = str(img[0].header['RW%d_GHZ' % (i)])
                        if freqGHz == '':
                            freqGHz = '148.0'
                            print(
                                ">>> WARNING: setting freqGHz = '%s' in getFRelWeights - this is okay if you're running on a TILe-C y-map"
                                % (freqGHz))
                        if freqGHz not in fRelTab.keys():
                            fRelTab.add_column(
                                atpy.Column(np.zeros(len(config.allTileNames)),
                                            freqGHz))
                        fRelTab[freqGHz][tileCount] = img[0].header['RW%d' %
                                                                    (i)]
        fRelTab.meta['NEMOVER'] = nemo.__version__
        fRelTab.write(fRelWeightsFileName, overwrite=True)

    return loadFRelWeights(fRelWeightsFileName)
Esempio n. 24
0
def read_tpf(fname,index,return_hdr=True):
    '''
    fname: str, filename
    index: int, hdulist index
            [0,1,2] = primary, target table, aperture mask
            [4,...] = photometry using specified aperture
    return_hdr: bool
    '''
    hdulist = fits.open(fname)
    if index == 0: #primary
        data = hdulist[index].data
        hdr = hdulist[index].header
        if return_hdr:
            return data, hdr
        else:
            return data
    elif index == 1: #target tables
        data = hdulist[index].data
        hdr = hdulist[index].header
        if return_hdr:
            return data, hdr
        else:
            return data
    elif index == 2: #aperture mask
        data = hdulist[index].data
        hdr = hdulist[index].header
        if return_hdr:
            return data, hdr
        else:
            return data
    else:
        df=table.Table(hdulist[index].data).to_pandas()
        hdr = hdulist[index].header
        if return_hdr:
            return df, hdr
        else:
            return df
Esempio n. 25
0
def test_simple_subclass():
    t = MyTable([[1, 2], [3, 4]])
    row = t[0]
    assert isinstance(row, MyRow)
    assert isinstance(t['col0'], MyColumn)
    assert isinstance(t.columns, MyTableColumns)
    assert isinstance(t.formatter, MyTableFormatter)

    t2 = MyTable(t)
    row = t2[0]
    assert isinstance(row, MyRow)
    assert str(row) == '(1, 3)'

    t3 = table.Table(t)
    row = t3[0]
    assert not isinstance(row, MyRow)
    assert str(row) != '(1, 3)'

    t = MyTable([[1, 2], [3, 4]], masked=True)
    row = t[0]
    assert isinstance(row, MyRow)
    assert str(row) == '(1, 3)'
    assert isinstance(t['col0'], MyMaskedColumn)
    assert isinstance(t.formatter, MyTableFormatter)
Esempio n. 26
0
    def interpolate_grid(self, nz=50, nq=50):
        
        self.nz=nz
        self.nq=nq
        zarr=np.linspace(np.min(self.grid0['LOGZ']), np.max(self.grid0['LOGZ']), self.nz)
        qarr=np.linspace(np.min(self.grid0['LOGQ']), np.max(self.grid0['LOGQ']), self.nq)
        
        nlines0=len(self.grid0['ID'][0])
        fluxarr=np.zeros((self.nz, self.nq, nlines0))  
        grid_x, grid_y = np.meshgrid(zarr, qarr)
        intergrid=self.nz*self.nq
#            define the new interpolated grid as a table 
        intergrid=table.Table()
        intergrid['LOGQ']=grid_y.flatten()
        intergrid['LOGZ']=grid_x.flatten()
        intergrid['LOGOHSUN']=[self.grid0['LOGOHSUN'][0]]*self.nq*self.nz
        intergrid['ID']=[self.grid0['ID'][0]]*self.nq*self.nz
        
        flux=np.array(self.grid0['FLUX'])
#        qauxarr=np.unique(self.grid0['LOGQ'])
#        zauxarr=np.unique(self.grid0['LOGZ'])
        logzin=np.array(self.grid0['LOGZ'])
        logqin=np.array(self.grid0['LOGQ'])

        for i in range(nlines0): 
             fluxarr[:,:,i]=interpolate.griddata( (logzin,logqin),
                    flux[:,i], (grid_x, grid_y), method='cubic')
        #   ALTERNATIVE INTERPOLATION SCHEME                 
        #                 f= interpolate.interp2d(zauxarr,qauxarr, fflux[:,:,i], kind='cubic')
        #                 fluxarr2[:,:,i]=f(zarr, qarr)
           
        #  GOING FROM A 2D grid to a 1D grid
        intergrid['FLUX']= self.make_grid_1d(intergrid, grid_x, grid_y, fluxarr) 
        
        self.intergrid=intergrid
        return self
Esempio n. 27
0
    def make_psf_tab_bands(self, mode='moffat', overwrite=False):
        """
		make measurements on the psf sizes of stamp-*.fits of all bands and write to file 'psf.csv'. 
		"""
        fn = self.get_fp_psf_tab(msrsuffix='_bands')

        if not os.path.isfile(fn) or overwrite:
            print("[plaindecomposer] making psf.csv")
            tab = at.Table([[mode]], names=['mode'])

            for band in self.bands:
                psf_fwhm_arcs = self._get_psf_fwhm_arcs(imgtag=band, mode=mode)
                tab['psf_fwhm_arcs_{}'.format(band)] = psf_fwhm_arcs

            for band in self.bands:
                psf_fwhm_pix = self._get_psf_fwhm_pix(imgtag=band, mode=mode)
                tab['psf_fwhm_pix_{}'.format(band)] = psf_fwhm_pix

            tab.write(fn, format='ascii.csv', overwrite=overwrite)
        else:
            print("[plaindecomposer] skip making psf.csv as file exists")

        status = os.path.isfile(fn)
        return status
Esempio n. 28
0
    def _run_fsps_newparams(self, tage, d):
        for k in d:
            self.sp.params[k] = d[k]

        lam, spec = self.sp.get_spectrum(tage=tage, peraa=True)

        # calculate spectral indices using velocity dispersion of zero
        self.sp.params['sigma_smooth'] = 0.
        lam, spec_zeroveldisp = self.sp.get_spectrum(tage=tage, peraa=True)

        sis = indices.data['ixname']
        sis_tab = t.Table(data=[
            t.Column([indices.StellarIndex(si)(lam, spec_zeroveldisp, axis=0)],
                     name=si) for si in sis
        ])

        self.sp.params['dust1'] = 0.
        self.sp.params['dust2'] = 0.
        lam, spec_zeroatten = self.sp.get_spectrum(tage=tage, peraa=True)
        H_ion_ph_rate = spec_to_photon_rate(x=lam,
                                            xunit=u.AA,
                                            spec=spec_zeroatten,
                                            specunit=u.Lsun / u.AA,
                                            ph_e_thresh=(c.h * c.c * c.Ryd),
                                            out_unit=u.ph / u.s)

        uv_slope = calc_uv_slope(x=lam,
                                 xunit=u.AA,
                                 spec=spec_zeroatten,
                                 specunit=u.Lsun / u.AA,
                                 ratio_unit=u.Lsun / u.AA,
                                 xrg=[505., 912.])

        self.sfh_changeflag = False

        return spec, sis_tab, H_ion_ph_rate, uv_slope
Esempio n. 29
0
def plate_plans_db(inputs,
                   plate_mode=False,
                   verbose=False,
                   overwrite=False,
                   log=None,
                   load_holes=True,
                   load_addenda=True):
    """Loads plateruns or plates from platePlans into the DB.

    Parameters:
        inputs (list, tuple):
            A list of plateruns or plates to be ingested into the DB.
        plate_mode (bool):
            If ``True``, treats ``inputs`` as a list of plates.
            Otherwise assumes they are plateruns.
        verbose (bool):
            If ``True`` outputs more information in the shell log.
        overwrite (bool):
            If ``True``, values in the DB will be overwritten if needed.
        log (``platedesign.core.logger.Logger`` object):
            A ``Logger`` object to use. Otherwise it will create a new log.
        load_holes (bool):
            If ``True``, loads the plateHoles file along with the plate.
        load_addenda (bool):
            If ``True``, loads the plateDefinitionAddenda file along with the design.

    """

    log = log or pd_log

    log.info('running plate_plans_db in mode={0!r}.'.format(
        'platerun' if not plate_mode else 'plate'))

    # Checks the connection
    conn_status = platedb.database.connected
    if conn_status:
        log.info('database connection is open.')
    else:
        raise RuntimeError(
            'cannot connect to the database. Review you connection settings.')

    # Creates a list of platePlans lines for each platerun and plate.
    # Converts the lines to an astropy table for easier handling.
    lines_dict = {}
    if not plate_mode:
        for platerun in inputs:
            lines = utils.get_lines_for_platerun(platerun)
            if len(lines) == 0:
                raise ValueError(
                    'no platePlans lines found for platerun {0!r}'.format(
                        platerun))
            lines_dict[platerun] = table.Table(lines)
    else:
        for plate in inputs:
            line = utils.get_lines_for_plate(plate)
            if len(line) == 0:
                raise ValueError(
                    'cannot find platePlans line for plate {0!r}'.format(
                        plate))
            platerun = line[0]['platerun']
            if platerun not in lines_dict:
                lines_dict[platerun] = table.Table(line)
            else:
                lines_dict[platerun].add_row(line[0])

    if len(lines_dict) == 0:
        raise ValueError('no plateruns found. Your input parameters '
                         'do not seem to match any plate.')

    # Check for valid "survey" values.
    # Joint surveys are separated by a hyphen, e.g. "apogee-marvels".
    # These should be split so that the literal string is not added as a new survey.
    unique_surveys = set()
    for platerun in lines_dict:
        for survey in lines_dict[platerun]['survey'].astype('U'):
            for xx in survey.split('-'):
                unique_surveys.add(xx)

    # Checks surveys
    for survey in unique_surveys:
        try:
            platedb.Survey.get(platedb.Survey.plateplan_name == survey)
            log.debug('survey {0!r} is in the DB'.format(survey))
        except peewee.DoesNotExist:
            raise ValueError(
                'A survey name was found that does not appear in the database: {0!r}. '
                'Please correct the platePlans.par entry or else add the new survey '
                'to the survey table in the plate database.'.format(survey))

    for platerun in lines_dict:

        run_lines = lines_dict[platerun]

        log.important('now doing platerun {0!r} ...'.format(platerun))

        # Populate plate_run table if a new value is found.
        try:
            year = int(platerun[0:4])
        except ValueError:
            warnings.warn(
                'Could not determine the year for platerun {0!r}; '
                'please update this value in the plate_run table by hand.'.
                format(platerun), UserWarning)

        pr, created = platedb.PlateRun.get_or_create(label=platerun, year=year)

        if not created:
            log.debug(
                'platerun {0} already is already in the DB.'.format(platerun))
        else:
            log.debug(
                'added platerun {0} to the plate_run table.'.format(platerun))

        design_ids = np.unique(run_lines['designid'])

        for design_id in design_ids:

            log.important('loading design_id={0}'.format(design_id))
            _load_design(design_id, log, overwrite=overwrite)

        if load_addenda:
            log.important('loading plateDefinitionAddendas ...')
            plate_addenda_db(design_ids, design_mode=True, log=log)

        plate_ids = np.unique(run_lines['plateid'])

        for plate_id in plate_ids:

            plate_line = run_lines[run_lines['plateid'] == plate_id][0]

            log.important('loading plate_id={0}'.format(plate_id))
            _load_plate(plate_id, plate_line, log, overwrite=overwrite)

        log.important(
            'populating observing ranges for {0} ... '.format(platerun))
        populate_obs_range(plate_ids, log=log)

        if load_holes:
            log.important('loading plate holes for {0} ...'.format(platerun))
            plate_holes_db(plate_ids,
                           plate_mode=True,
                           log=log,
                           overwrite=overwrite)

    log.important('success! All designs and plates have been loaded.')
Esempio n. 30
0
def fortney_grid(args, write_plot=False, write_table=False):
    """
    Function to grab a Fortney Grid model, plot it, and make a table.

    Parameters
    ----------
    args : dict
        Dictionary of arguments for the Fortney Grid. Must include :
        temp
        chem
        cloud
        pmass
        m_unit
        reference_radius
        r_unit
        rstar
        rstar_unit
    write_plot : bool, optional
        Whether or not to save the bokeh plot, defaults to False.
    write_table : bool, optional
        Whether or not to save the ascii table, defaults to False.

    Returns
    -------
    fig : bokeh object
        The unsaved bokeh plot.
    fh : ascii table object
        The unsaved ascii table.
    temp_out : list of str of int
        The list of temperatures in the model grid.
    """
    utils.check_for_data('fortney')

    # Check for Fortney Grid database
    print(
        os.path.join(utils.get_env_variables()['exoctk_data'],
                     'fortney/fortney_models.db'))
    try:
        db = create_engine(
            'sqlite:///' +
            os.path.join(utils.get_env_variables()['exoctk_data'],
                         'fortney/fortney_models.db'))
        header = pd.read_sql_table('header', db)
    except:
        raise Exception(
            'Fortney Grid File Path is incorrect, or not initialized')

    if args:
        rstar = float(args['rstar'])
        rstar = (rstar * u.Unit(args['rstar_unit'])).to(u.km)
        reference_radius = float(args['reference_radius'])
        rplan = (reference_radius * u.Unit(args['r_unit'])).to(u.km)
        temp = float(args['temp'])
        # clouds
        cloud = args['cloud']
        if cloud.find('flat') != -1:
            flat = int(cloud[4:])
            ray = 0
        elif cloud.find('ray') != -1:
            ray = int(cloud[3:])
            flat = 0
        elif int(cloud) == 0:
            flat = 0
            ray = 0
        else:
            flat = 0
            ray = 0
            print('No cloud parameter not specified, default no clouds added')

        # chemistry
        chem = args['chem']
        if chem == 'noTiO':
            noTiO = True
        if chem == 'eqchem':
            noTiO = False
            # grid does not allow clouds for cases with TiO
            flat = 0
            ray = 0

        fort_grav = 25.0 * u.m / u.s**2

        df = header.loc[(header.gravity == fort_grav) & (header.temp == temp) &
                        (header.noTiO == noTiO) & (header.ray == ray) &
                        (header.flat == flat)]

        wave_planet = np.array(
            pd.read_sql_table(df['name'].values[0], db)['wavelength'])[::-1]
        r_lambda = np.array(
            pd.read_sql_table(df['name'].values[0], db)['radius']) * u.km

        # All fortney models have fixed 1.25 radii
        z_lambda = r_lambda - (1.25 * u.R_jup).to(u.km)

        # Scale with planetary mass
        pmass = float(args['pmass'])
        mass = (pmass * u.Unit(args['m_unit'])).to(u.kg)

        # Convert radius to m for gravity units
        gravity = constants.G * (mass) / (rplan.to(u.m))**2.0

        # Scale lambbda (this technically ignores the fact that scaleheight
        # is altitude dependent) therefore, it will not be valide for very
        # very low gravities
        z_lambda = z_lambda * fort_grav / gravity

        # Create new wavelength dependent R based on scaled ravity
        r_lambda = z_lambda + rplan

        # Finally compute (rp/r*)^2
        flux_planet = np.array(r_lambda**2 / rstar**2)

        x = wave_planet
        y = flux_planet[::-1]

    else:
        df = pd.read_sql_table('t1000g25_noTiO', db)
        x, y = df['wavelength'], df['radius']**2.0 / 7e5**2.0

    tab = at.Table(data=[x, y])
    fh = io.StringIO()
    tab.write(fh, format='ascii.no_header')

    if write_table:
        tab.write('fortney.dat', format='ascii.no_header')

    fig = figure(plot_width=1100, plot_height=400)
    fig.line(x, 1e6 * (y - np.mean(y)), color='Black', line_width=0.5)
    fig.xaxis.axis_label = 'Wavelength (um)'
    fig.yaxis.axis_label = 'Rel. Transit Depth (ppm)'

    if write_plot:
        output_file('fortney.html')
        save(fig)

    # Return temperature list for the fortney grid page
    temp_out = list(map(str, header.temp.unique()))

    return fig, fh, temp_out