Beispiel #1
0
def detect_with_sep(event,
                    detect_thresh=2.,
                    npixels=8,
                    grow_seg=5,
                    gauss_fwhm=2.,
                    gsize=3,
                    im_wcs=None,
                    root='mycat'):

    drz_file = event['fits_s3_key']
    drz_file_bucket = event['fits_s3_bucket']
    root = drz_file.split('/')[-1].split('_')[0]

    s3 = boto3.resource('s3')
    s3_client = boto3.client('s3')
    bkt = s3.Bucket(drz_file_bucket)
    bkt.download_file(drz_file,
                      '/tmp/{0}'.format(root),
                      ExtraArgs={"RequestPayer": "requester"})

    im = fits.open('/tmp/{0}'.format(root))
    im_wcs = wcs.WCS(im[1].header, relax=True)

    data = im[1].data.byteswap().newbyteorder()
    wht_data = im[2].data.byteswap().newbyteorder()
    data_mask = np.cast[data.dtype](data == 0)

    ## Get AB zeropoint
    if 'PHOTFNU' in im[0].header:
        ZP = -2.5 * np.log10(im[0].header['PHOTFNU']) + 8.90
    elif 'PHOTFLAM' in im[0].header:
        ZP = (-2.5 * np.log10(im[0].header['PHOTFLAM']) - 21.10 -
              5 * np.log10(im[0].header['PHOTPLAM']) + 18.6921)
    else:
        print(
            'Couldn\'t find PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25')
        return None

    # Scale fluxes to mico-Jy
    uJy_to_dn = 1 / (3631 * 1e6 * 10**(-0.4 * ZP))

    err = 1 / np.sqrt(wht_data)

    # set up the error array
    err = 1 / np.sqrt(wht_data)
    err[~np.isfinite(err)] = 0
    mask = (err == 0)

    # get the background
    bkg = sep.Background(data, mask=mask, bw=32, bh=32, fw=3, fh=3)
    bkg_data = bkg.back()

    ratio = bkg.rms() / err
    err_scale = np.median(ratio[(~mask) & np.isfinite(ratio)])

    err *= err_scale

    objects = sep.extract(data - bkg_data,
                          detect_thresh,
                          err=err,
                          mask=mask,
                          minarea=14,
                          filter_kernel=GAUSS_3_7x7,
                          filter_type='conv',
                          deblend_nthresh=32,
                          deblend_cont=0.005,
                          clean=True,
                          clean_param=1.,
                          segmentation_map=False)

    catalog = Table(objects)

    # add things to catalog

    autoparams = [2.5, 3.5]

    catalog['number'] = np.arange(len(catalog), dtype=np.int32) + 1
    catalog['theta'] = np.clip(catalog['theta'], -np.pi / 2, np.pi / 2)
    for c in ['a', 'b', 'x', 'y', 'theta']:
        catalog = catalog[np.isfinite(catalog[c])]

    catalog['ra'], catalog['dec'] = im_wcs.all_pix2world(
        catalog['x'], catalog['y'], 1)
    catalog['ra'].unit = u.deg
    catalog['dec'].unit = u.deg
    catalog['x_world'], catalog['y_world'] = catalog['ra'], catalog['dec']

    kronrad, krflag = sep.kron_radius(data - bkg_data, catalog['x'],
                                      catalog['y'], catalog['a'], catalog['b'],
                                      catalog['theta'], 6.0)

    kronrad *= autoparams[0]
    kronrad[~np.isfinite(kronrad)] = autoparams[1]
    kronrad = np.maximum(kronrad, autoparams[1])

    kron_out = sep.sum_ellipse(data - bkg_data,
                               catalog['x'],
                               catalog['y'],
                               catalog['a'],
                               catalog['b'],
                               catalog['theta'],
                               kronrad,
                               subpix=5,
                               err=err)

    kron_flux, kron_fluxerr, kron_flag = kron_out
    kron_flux_flag = kron_flag

    catalog['mag_auto_raw'] = ZP - 2.5 * np.log10(kron_flux)
    catalog['magerr_auto_raw'] = 2.5 / np.log(10) * kron_fluxerr / kron_flux

    catalog['mag_auto'] = catalog['mag_auto_raw'] * 1.
    catalog['magerr_auto'] = catalog['magerr_auto_raw'] * 1.

    catalog['kron_radius'] = kronrad * u.pixel
    catalog['kron_flag'] = krflag
    catalog['kron_flux_flag'] = kron_flux_flag

    # Make a plot
    im_data = im[1].data
    im_shape = im_data.shape
    im_data[np.isnan(im_data)] = 0.0

    # Trim the top and bottom 1 percent of pixel values
    top = np.percentile(im_data, 99)
    im_data[im_data > top] = top
    bottom = np.percentile(im_data, 1)
    im_data[im_data < bottom] = bottom

    # Scale the data.
    im_data = im_data - im_data.min()
    im_data = (im_data / im_data.max()) * 255.
    im_data = np.uint8(im_data)

    f, (ax) = plt.subplots(1, 1, sharex=True)
    f.set_figheight(12)
    f.set_figwidth(12)
    ax.imshow(im_data, cmap="Greys", clim=(0, 255), origin='lower')
    ax.plot(catalog['x'],
            catalog['y'],
            'o',
            markeredgewidth=1,
            markeredgecolor='red',
            markerfacecolor='None')
    ax.set_xlim([-0.05 * im_shape[1], 1.05 * im_shape[1]])
    ax.set_ylim([-0.05 * im_shape[0], 1.05 * im_shape[0]])
    f.savefig('/tmp/{0}.png'.format(root))

    # Write the catalog to local disk
    catalog.write('/tmp/{0}.catalog.fits'.format(root), format='fits')

    # Write out to S3
    s3 = boto3.resource('s3')
    s3.meta.client.upload_file('/tmp/{0}.catalog.fits'.format(root),
                               event['s3_output_bucket'],
                               '{0}/{1}.catalog.fits'.format(root, root))
    s3.meta.client.upload_file('/tmp/{0}.png'.format(root),
                               event['s3_output_bucket'],
                               'PNG/{0}.png'.format(root))
Beispiel #2
0
    ##
    ##  Note:  assumed fixed center for moonsep, and powerlaw index following 
    ##         trial and error with these parameters. 
    ##
    ##         params[0] ->  dependence on moon alt. -> Z0.
    ##         params[1] ->  dependence on moon alt. -> Z1.      

    return  params[0] + params[1] * np.abs(moonsep - 96.) ** 1.863

##  Airmass to run provided by command line. 
all_airmass = [1.0, 1.2, 1.6, 2.0, 2.4, 2.8, 3.0]
airmass     = all_airmass[np.int(sys.argv[1])]

##  CHH Moon model called for a grid of conditions. 
dat         = np.loadtxt('../../dat/moons_{:.1f}.txt'.format(airmass))
dat         = Table(dat, names=['AIRMASS', 'MOONFRAC', 'MOONALT', 'MOONSEP', '4000', '6700', '7160', '8070', '9060'])

##  Get normalisation.
norm        = np.loadtxt('../../dat/moon_norm.txt')
norm        = Table(norm, names=['AIRMASS', 'MOONFRAC', 'MOONALT', 'MOONSEP', '4000', '6700', '7160', '8070', '9060'])

##  Normalisation dat. 
for band in ['4000', '6700', '7160', '8070', '9060']:
    dat[band] /= norm[band]

    dat[band]  = np.clip(dat[band], 1.0, None)

print(dat)

##  Choose a band to define the expfac. 
dat['EXPFAC'] = dat['4000']
Beispiel #3
0
def classify_targets(fits_table,
                     band_names=None,
                     lambda_eff=None,
                     out_path=None):
    """Tabulate fitting coordinates for SNe based on their fit results

    See the ``create_empty_table`` function for the assumed input table format.
    If ``band_names`` or ``lambda_eff`` are not given, they are taken from
    ``fits_table.meta``. Any targets having one or more fits with the string
    'failed' in the message are skipped (case insensitive).

    Args:
        fits_table (Table): A table of fit results
        band_names  (list): List of band names used when fitting
        lambda_eff  (list): The effective wavelength of each band in angstroms
        out_path     (str): Optionally write results to file

    Returns:
        An astropy table of fitting coordinates
    """

    if band_names is None or lambda_eff is None:
        band_names = fits_table.meta['band_names']
        lambda_eff = fits_table.meta['lambda_eff']

    # Keep only objects that don't have failed fits in any band
    fits_df = fits_table.to_pandas()
    failed_fits = fits_df['message'].str.lower().str.contains('failed')
    failed_ids = fits_df[failed_fits].obj_id.unique()
    good_fits = fits_df[~fits_df.obj_id.isin(failed_ids)]
    good_fits.set_index(['obj_id', 'source'], inplace=True)

    out_table = Table(names=['obj_id', 'x', 'y'], dtype=['U100', float, float])
    for obj_id in good_fits.index.unique(level='obj_id'):

        try:
            hsiao_data = good_fits.loc[obj_id, 'hsiao_x1']
            redshift = hsiao_data[hsiao_data['band'] == 'all']['z'][0]
            blue_bands, red_bands = utils.split_bands(band_names, lambda_eff,
                                                      redshift)

            blue_bands = np.concatenate([blue_bands, ['blue']])
            red_bands = np.concatenate([red_bands, ['red']])

            hsiao_blue = hsiao_data[hsiao_data['band'].isin(blue_bands)]
            hsiao_red = hsiao_data[hsiao_data['band'].isin(red_bands)]
            hsiao_blue_chisq = hsiao_blue['chisq'].sum(
            ) / hsiao_blue['ndof'].sum()
            hsiao_red_chisq = hsiao_red['chisq'].sum() / hsiao_red['ndof'].sum(
            )

            sn91bg_data = good_fits.loc[obj_id, 'sn91bg']
            sn91bg_blue = sn91bg_data[sn91bg_data['band'].isin(blue_bands)]
            sn91bg_red = sn91bg_data[sn91bg_data['band'].isin(red_bands)]
            sn91bg_blue_chisq = sn91bg_blue['chisq'].sum(
            ) / sn91bg_blue['ndof'].sum()
            sn91bg_red_chisq = sn91bg_red['chisq'].sum(
            ) / sn91bg_red['ndof'].sum()

        except KeyError:
            continue

        else:
            x = hsiao_blue_chisq - sn91bg_blue_chisq
            y = hsiao_red_chisq - sn91bg_red_chisq
            out_table.add_row([obj_id, x, y])

            if out_path:
                out_table.write(out_path)

    return out_table
     tbl_out.append((
             os.path.basename(image),
             hdulist[0].header['OBJECT'],
             icra, icdec, iwidth, iheight,
             ibox[0], ibox[1], ibox[2], ibox[3],
             ))
     hdulist.close()
 cra, cdec, width, height = to_ds9_box(box)
 tbl_out.append((
     'all', 'all',
     cra, cdec, width, height,
     box[0], box[1], box[2], box[3]
     ))
 tbl_out = Table(np.array(tbl_out), names=[
     'filename', 'objname',
     'ra', 'dec', 'width', 'height',
     'ramin', 'ramax', 'decmin', 'decmax',
     ])
 tbl_out.write(out_file, format='ascii.commented_header')
 with open(out_reg, 'w') as fo:
     fo.write('global color=red\n')
     for entry in tbl_out[:-1]:
         filename, objname, icra, icdec, iwidth, iheight = [
                 entry[i] for i in range(6)]
         fo.write('fk5; box({0},{1},{2},{3}, 0) # text={{{4}}}\n'.format(
                 icra, icdec, iwidth, iheight, "{0} {1}".format(
                     os.path.splitext(filename)[0], objname)))
     fo.write('fk5; box({0},{1},{2},{3}, 0) # color=yellow'.format(
             cra, cdec, width, height))
 # create wcs file
 minra, maxra, mindec, maxdec = box
Beispiel #5
0
def fetch_fermi_catalog(catalog, extension=None):
    """Fetch Fermi catalog data.

    Reference: http://fermi.gsfc.nasa.gov/ssc/data/access/lat/.

    The Fermi catalogs contain the following relevant catalog HDUs:

    * 3FGL Catalog : LAT 4-year Point Source Catalog
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
        * ``ExtendedSources`` Extended Source Catalog Table.
    * 2FGL Catalog : LAT 2-year Point Source Catalog
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
        * ``ExtendedSources`` Extended Source Catalog Table.
    * 1FGL Catalog : LAT 1-year Point Source Catalog
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
    * 2FHL Catalog : Second Fermi-LAT Catalog of High-Energy Sources
        * ``Count Map`` AIT projection 2D count image
        * ``2FHL Source Catalog`` Main catalog
        * ``Extended Sources`` Extended Source Catalog Table
        * ``ROIs`` Regions of interest
    * 1FHL Catalog : First Fermi-LAT Catalog of Sources above 10 GeV
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
        * ``ExtendedSources`` Extended Source Catalog Table.
    * 2PC Catalog : LAT Second Catalog of Gamma-ray Pulsars
        * ``PULSAR_CATALOG`` Pulsar Catalog Table.
        * ``SPECTRAL`` Table of Pulsar Spectra Parameters.
        * ``OFF_PEAK`` Table for further Spectral and Flux data for the Catalog.

    Parameters
    ----------
    catalog : {'3FGL', '2FGL', '1FGL', '1FHL', '2FHL', '2PC'}
       Specifies which catalog to display.
    extension : str
        Specifies which catalog HDU to provide as a table (optional).
        See list of catalog HDUs above.

    Returns
    -------
    hdu_list (Default) : `~astropy.io.fits.HDUList`
        Catalog FITS HDU list (for access to full catalog dataset).
    catalog_table : `~astropy.table.Table`
        Catalog table for a selected hdu extension.

    Examples
    --------
    >>> from gammapy.catalog import fetch_fermi_catalog
    >>> fetch_fermi_catalog('2FGL')
        [<astropy.io.fits.hdu.image.PrimaryHDU at 0x3330790>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x338b990>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x3396450>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x339af10>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x339ff10>]

    >>> from gammapy.catalog import fetch_fermi_catalog
    >>> fetch_fermi_catalog('2FGL', 'LAT_Point_Source_Catalog')
        <Table rows=1873 names= ... >
    """
    BASE_URL = 'http://fermi.gsfc.nasa.gov/ssc/data/access/lat/'

    if catalog == '3FGL':
        url = BASE_URL + '4yr_catalog/gll_psc_v16.fit'
    elif catalog == '2FGL':
        url = BASE_URL + '2yr_catalog/gll_psc_v08.fit'
    elif catalog == '1FGL':
        url = BASE_URL + '1yr_catalog/gll_psc_v03.fit'
    elif catalog == '1FHL':
        url = BASE_URL + '1FHL/gll_psch_v07.fit'
    elif catalog == '2FHL':
        url = 'https://github.com/gammapy/gammapy-extra/raw/master/datasets/catalogs/fermi/gll_psch_v08.fit.gz'
    elif catalog == '2PC':
        url = BASE_URL + '2nd_PSR_catalog/2PC_catalog_v03.fits'
    else:
        ss = 'Invalid catalog: {0}\n'.format(catalog)
        raise ValueError(ss)

    filename = download_file(url, cache=True)
    hdu_list = fits.open(filename)

    if extension is None:
        return hdu_list

    # TODO: 2FHL doesn't have a 'CLASS1' column, just 'CLASS'
    # It's probably better if we make a `SourceCatalog` class
    # and then sub-class `FermiSourceCatalog` and `Fermi2FHLSourceCatalog`
    # and handle catalog-specific stuff in these classes,
    # trying to provide an as-uniform as possible API to the common catalogs.
    table = Table(hdu_list[extension].data)
    table['IS_GALACTIC'] = [_is_galactic(_) for _ in table['CLASS1']]

    return table
    SEDclasscolumn = Column(data=[seddata[n] if n in seddata else '$E$' for n in names], name="SED Class")
    classification = {x[:split1].strip():"{0}".format(x[split2:].strip()) for x in lines[1:end]}
    classificationcolumn = Column(data=[classification[n] if n in classification else '-' for n in names], name="Classification")
    footnotes_start = lines.index('Classification Key\n') + 1
    footer = "".join(["{0}{1} \\\\\n"
                      .format("${0}$".format(x[0])
                              if len(x) > 1 and x[1]==':'
                              else "\\newline" if len(x) <= 1
                              else x[0],
                              x[1:].strip()) for x in
                      lines[footnotes_start:]])

postbl = Table([Column(data=names, name='Source Name'),
                Column(data=coords.ra.to_string(unit=u.hour, sep=':', precision=2), name='RA'),
                Column(data=coords.dec.to_string(unit=u.deg, sep=':', precision=1), name='Dec'),
                Column(data=radii, name='Radius'),
                Column(data=(radii*5.1*u.kpc).to(u.pc, u.dimensionless_angles()), name='Phys. Radius'),
                SEDclasscolumn,
                classificationcolumn,
               ])

import natsort
postbl = postbl[natsort.index_natsorted(postbl['Source Name'])]

latexdict['header_start'] = '\label{tab:positions}'
latexdict['caption'] = 'Source Positions'
latexdict['tablefoot'] = ('\par\nObjects with name e\#d are the diffuse '
                          'counterparts to point sources.  '
                          'The absolute positional accuracy is '
                          '$\sim0.2\\arcsec$.  '
                          'Sources with no radius are unresolved, '
                          'with '
Beispiel #7
0
def make_tint_curves(mag=20, spec_res=100, sq_aper_diam=0.30, seeing_limited=False):
    tint = np.arange(300, 3600+1, 300)
    snr_y = np.zeros(len(tint), dtype=float)
    snr_j = np.zeros(len(tint), dtype=float)
    snr_h = np.zeros(len(tint), dtype=float)
    
    snr_sum_y = np.zeros(len(tint), dtype=float)
    snr_sum_j = np.zeros(len(tint), dtype=float)
    snr_sum_h = np.zeros(len(tint), dtype=float)
    
    spec_y_tab = None
    spec_j_tab = None
    spec_h_tab = None

    for tt in range(len(tint)):
        print 'Tint: ', tint[tt]
        blah_y = etc_uh_roboAO(mag, 'Y', tint[tt],
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam, 
                               seeing_limited=seeing_limited)
        blah_j = etc_uh_roboAO(mag, 'J', tint[tt],
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        blah_h = etc_uh_roboAO(mag, 'H', tint[tt],
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        
        col_y_suffix = '_Y_{0:d}'.format(tint[tt])
        col_j_suffix = '_J_{0:d}'.format(tint[tt])
        col_h_suffix = '_H_{0:d}'.format(tint[tt])

        spec_signal_y = Column(name='sig'+col_y_suffix, data=blah_y[4])
        spec_signal_j = Column(name='sig'+col_j_suffix, data=blah_j[4])
        spec_signal_h = Column(name='sig'+col_h_suffix, data=blah_h[4])
        spec_bkg_y = Column(name='bkg'+col_y_suffix, data=blah_y[5])
        spec_bkg_j = Column(name='bkg'+col_j_suffix, data=blah_j[5])
        spec_bkg_h = Column(name='bkg'+col_h_suffix, data=blah_h[5])
        spec_snr_y = Column(name='snr'+col_y_suffix, data=blah_y[6])
        spec_snr_j = Column(name='snr'+col_j_suffix, data=blah_j[6])
        spec_snr_h = Column(name='snr'+col_h_suffix, data=blah_h[6])
        
        if spec_y_tab == None:
            spec_y_tab = Table([blah_y[3]], names=['wave_Y'])
        if spec_j_tab == None:
            spec_j_tab = Table([blah_j[3]], names=['wave_J'])
        if spec_h_tab == None:
            spec_h_tab = Table([blah_h[3]], names=['wave_H'])

        spec_y_tab.add_columns([spec_signal_y, spec_bkg_y, spec_snr_y])
        spec_j_tab.add_columns([spec_signal_j, spec_bkg_j, spec_snr_j])
        spec_h_tab.add_columns([spec_signal_h, spec_bkg_h, spec_snr_h])

        snr_y[tt]  = blah_y[0]
        snr_j[tt]  = blah_j[0]
        snr_h[tt]  = blah_h[0]

        snr_sum_y[tt] = math.sqrt((spec_snr_y**2).sum())
        snr_sum_j[tt] = math.sqrt((spec_snr_j**2).sum())
        snr_sum_h[tt] = math.sqrt((spec_snr_h**2).sum())
        

    avg_tab = Table([tint, 
                     snr_y, snr_sum_y, 
                     snr_j, snr_sum_j,
                     snr_h, snr_sum_h],
                    names=['tint', 
                           'snr_y', 'snr_sum_y', 
                           'snr_j', 'snr_sum_j',
                           'snr_h', 'snr_sum_h'])


    out_file = 'roboAO_tint_m{0:d}_R{1:d}_ap{2:0.3f}'.format(mag, spec_res, sq_aper_diam)

    if seeing_limited:
        out_file += '_seeing'
    
    # Save the tables
    spec_y_tab.write(out_file + '_spec_y_tab.fits', overwrite=True)
    spec_j_tab.write(out_file + '_spec_j_tab.fits', overwrite=True)
    spec_h_tab.write(out_file + '_spec_h_tab.fits', overwrite=True)
    avg_tab.write(out_file + '_avg_tab.fits', overwrite=True)

    return
Beispiel #8
0
#!/usr/bin/env python
import numpy as np
import sncosmo
from collections import OrderedDict as odict
from astropy.table import Table

model = sncosmo.Model(source='salt2')
model.set(z=0.5, c=0.2, t0=55100., x1=0.5)
model.set_source_peakabsmag(-19.5, 'bessellb', 'ab')

times = np.linspace(55070., 55150., 40)
bands = np.array(10 * ['sdssg', 'sdssr', 'sdssi', 'sdssz'])
zp = 25. * np.ones(40)
zpsys = np.array(40 * ['ab'])

flux = model.bandflux(bands, times, zp=zp, zpsys=zpsys)
fluxerr = (0.05 * np.max(flux)) * np.ones(40, dtype=float)
flux += fluxerr * np.random.randn(40)

data = Table(odict([('time', times), ('band', bands), ('flux', flux),
                    ('fluxerr', fluxerr), ('zp', zp), ('zpsys', zpsys)]),
             meta=dict(zip(model.param_names, model.parameters)))

sncosmo.write_lc(data, 'example_photometric_data.dat')
Beispiel #9
0
def run(photometry_table,
        zcol,
        data_file="cigale_in.fits",
        config_file="pcigale.ini",
        wait_for_input=False,
        plot=True,
        outdir='out',
        compare_obs_model=False,
        **kwargs):
    """
    Input parameters and then run CIGALE.

    Args:
        photometry_table (astropy Table):
            A table from some photometric catalog with magnitudes and
            error measurements. Currently supports
            DES, DECaLS, SDSS, Pan-STARRS and WISE
        zcol (str):
            Name of the column with redshift estimates.
        data_file (str, optional):
            Root name for the photometry data file generated used as input to CIGALE
        config_file (str, optional):
            Root name for the file where CIGALE's configuration is generated
        wait_for_input (bool, optional):
            If true, waits for the user to finish editing the auto-generated config file
            before running.
        plot (bool, optional):
            Plots the best fit SED if true
        cores (int, optional):
            Number of CPU cores to be used. Defaults
            to all cores on the system.
        outdir (str, optional):
            Path to the many outputs of CIGALE
            If not supplied, the outputs will appear in a folder named out/
        compare_obs_model (bool, optional):
            If True compare the input observed fluxes with the model fluxes
            This writes a Table to outdir named 'photo_observed_model.dat'

    kwargs:  These are passed into gen_cigale_in() and _initialise()
        sed_modules (list of 'str', optional):
            A list of SED modules to be used in the 
            PDF analysis. If this is being input, there
            should be a corresponding correct dict
            for sed_modules_params.
        sed_module_params (dict, optional):
            A dict containing parameter values for
            the input SED modules. Better not use this
            unless you know exactly what you're doing.

    """
    gen_cigale_in(photometry_table,
                  zcol,
                  infile=data_file,
                  overwrite=True,
                  **kwargs)
    _initialise(data_file, config_file=config_file, **kwargs)
    if wait_for_input:
        input("Edit the generated config file {:s} and press any key to run.".
              format(config_file))
    cigconf = Configuration(config_file)
    analysis_module = get_module(cigconf.configuration['analysis_method'])
    analysis_module.process(cigconf.configuration)
    if plot:
        try:
            from pcigale_plots import sed  # This modifies the backend to Agg so I hide it here
            old_version = True
            import pcigale
            warnings.warn(
                "You are using CIGALE version {:s}, for which support is deprecated. Please update to 2020.0 or higher."
                .format(pcigale.__version__))
        except ImportError:
            from pcigale_plots.plot_types.sed import sed
            old_version = False

        if old_version:
            sed(cigconf, "mJy", True)
        else:
            # TODO: Let the user customize the plot.
            series = [
                'stellar_attenuated', 'stellar_unattenuated', 'dust', 'agn',
                'model'
            ]
            sed(cigconf, "mJy", True, (False, 1e5), (False, 1e2), series,
                "pdf", "out")
        # Set back to a GUI
        import matplotlib
        matplotlib.use('TkAgg')

    # Rename the default output directory?
    if outdir != 'out':
        try:
            os.system("rm -rf {}".format(outdir))
            os.system("mv out {:s}".format(outdir))
        except:
            print("Invalid output directory path. Output stored in out/")

    # Move input files into outdir too
    os.system("mv {:s} {:s}".format(data_file, outdir))
    os.system("mv {:s} {:s}".format(config_file, outdir))
    os.system("mv {:s}.spec {:s}".format(config_file, outdir))

    # Compare?
    if compare_obs_model:
        #Generate an observation/model flux comparison table.
        with Database() as base:
            filters = OrderedDict([
                (name, base.get_filter(name))
                for name in cigconf.configuration['bands']
                if not (name.endswith('_err') or name.startswith('line'))
            ])
            filters_wl = np.array(
                [filt.pivot_wavelength for filt in filters.values()])
            mods = Table.read(outdir + '/results.fits')

            try:
                obs = Table.read(
                    os.path.join(outdir, cigconf.configuration['data_file']))
            except:
                print(
                    "Something went wrong here. Astropy was unable to read the observations table. Please ensure it is in the fits format."
                )
                return
            for model, obj in zip(mods, obs):
                photo_obs_model = Table()
                photo_obs_model['lambda_filter'] = [
                    wl / 1000 for wl in filters_wl
                ]
                photo_obs_model['model_flux'] = np.array(
                    [model["best." + filt] for filt in filters.keys()])
                photo_obs_model['observed_flux'] = np.array(
                    [obj[filt] for filt in filters.keys()])
                photo_obs_model['observed_flux_err'] = np.array(
                    [obj[filt + '_err'] for filt in filters.keys()])
                photo_obs_model.write(outdir + "/photo_observed_model_" +
                                      str(model['id']) + ".dat",
                                      format="ascii",
                                      overwrite=True)
            #import pdb; pdb.set_trace()

    return
Beispiel #10
0
    gaussian = np.linspace(0, windowsize, windowsize + 1)  # 2.718281828459045
    gaussianF = np.power(
        np.e, -np.power(gaussian - windowsize / 2, 2) / (2 * sigma * sigma))
    gaussianF = gaussianF / np.sum(gaussianF)
    postFlux = scipy.signal.convolve(p, gaussianF, 'same')
    #    plt.figure(1)
    #    plt.plot(gaussian,gaussianF,'-o')
    #    plt.show()
    return postFlux


dir = './spec/PHOENIX-ACES-AGSS-COND-2011_Z-0.0/'
filename = 'lte02300-0.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'

hl = fits.open(dir + filename)
spec = Table(hl[1].data)

wave = spec['wave']
flux = spec['flux']

plt.figure(2)
plt1 = plt.plot(wave, flux, '-', label='origin')

flux1 = myConv(flux, 10)
plt2 = plt.plot(wave, flux1, '-', label='w = 10')

flux1 = myConv(flux, 50)
plt3 = plt.plot(wave, flux1, '-', label='w = 50')
plt.xlabel('wavelength')
plt.ylabel('flux')
plt.legend()
Beispiel #11
0
def rdz_flux_from_hdf_cubes(hdfcont,
                            minfrac=0.2,
                            maxfrac=2.0,
                            nperifu=1000,
                            add_flim=False,
                            sncut=6.0,
                            logspaced=False):
    """
    Generate ra, dec, redshift and
    flux from a sensivity cube, distribute
    uniformly in x,y,z cube pixels

    Parameters
    ----------
    hdfcont :  SensitivityCubeHDF5Container
        sensitivity cubes for the shot to simulate
    minfrac, maxfrac : float
        the minimum and maximum fraction
        of the flux limit used to define 
        a range of fluxes
    nperifu : int (optional)
        number of sources per IFU
    sncut : float
        the cut on SNR to pass to the flux
        limits API if add_flim=True
    """

    ra = []
    dec = []
    wave = []
    flux = []
    flims = []
    ifuslots = []
    tables = []
    ifu_ra = []
    ifu_dec = []

    for ifuslot, scube in hdfcont.itercubes():

        #if not "086" in ifuslot:
        #    continue

        # Generate randoms in pixel space then transform
        xsize = scube.sigmas.shape[2]
        ysize = scube.sigmas.shape[1]
        zsize = scube.sigmas.shape[0]
        # Pixels centers start at 0.0 end at xsize - 1
        x = uniform(-0.5, xsize + 0.5, size=nperifu)
        y = uniform(-0.5, ysize + 0.5, size=nperifu)
        # Not redshift!
        z = uniform(0, zsize - 1, size=nperifu)
        r, d, l = scube.wcs.all_pix2world(x, y, z, 0)

        if add_flim:
            flim = scube.get_f50(r, d, l, sncut)
            flim[flim < 1e-30] = 100
            flim[logical_not(isfinite(flim))] = 100
            flims.extend(flim)
        else:
            # fixed value to generate fluxes around
            flim = 5e-17

        if logspaced:
            logf = uniform(log10(flim * minfrac),
                           log10(flim * maxfrac),
                           size=nperifu)
            f = power(10, logf)
        else:
            sqrtf = uniform(sqrt(flim * minfrac),
                            sqrt(flim * maxfrac),
                            size=nperifu)
            f = square(sqrtf)

        ra.extend(r)
        dec.extend(d)
        wave.extend(l)
        flux.extend(f)
        ifuslots.extend([ifuslot] * nperifu)
        ifra, ifdec, l = scube.wcs.all_pix2world(xsize / 2 - 0.5,
                                                 ysize / 2. - 0.5, 0, 0)
        ifu_ra.extend([ifra] * nperifu)
        ifu_dec.extend([ifdec] * nperifu)
        print("{:s} {:f} {:f}".format(ifuslot, ifra, ifdec))

    table = Table(
        [ra, dec, wave, flux, ifuslots, ifu_ra, ifu_dec],
        names=["ra", "dec", "wave", "flux", "ifuslot", "ifu_ra", "ifu_dec"])

    if add_flim:
        table["flim"] = flim

    print("Generated {:d} sources".format(len(ra)))

    return table
def getdata(refcat, minra, redo=False, silent=False, logger=None):
    """
    Get reference catalog information from DL database 
 
    Parameters
    ----------
    cenra : float
       Central RA for the search. 
    cendec : float
       Central DEC for the search. 
    radius : float
       Search radius in degrees. 
    refcat : table
       Reference catalog name (e.g. 2MASS, Gaia, etc.)
    version : str
       Version of NSC reduction.. 
    saveref : bool, optional
       Save the output to SAVEFILE or a default filename. Default is False.
    savefile : str, optional
       The file to save to or search for existing catalog. 
    silent : bool, optional
       Don't print anything to the screen. 
    logger : logging object, optional
       Logging object to use for printing messages.
 
    Returns
    -------
    ref : astropy table
        Search results from the reference catalog. 
 
    Example
    -------

    cat = getrefcat(cenra,cendec,radius,refcat,file=file,saveref=saveref) 
 
    By D. Nidever  Sep 2017 
    Translated to Python by D. Nidever, April 2022
    """

    count = 0
    t0 = time.time()

    if logger is None:
        logger = dln.basiclogger()

    # Check that we have psql installed
    out = subprocess.check_output(['which', 'psql'], shell=False)
    if type(out) is bytes:
        out = out.decode()
    out = out.strip()
    if dln.size(out) > 1:
        out = out[0]
    if os.path.exists(out) == 0:
        raise ValueError('No PSQL found on this sytem.')

    # Temporary directory
    # /tmp is often small and get get fille dup
    dldir, mssdir, localdir = utils.rootdirs()

    # FLIP THIS AROUND, INPUT SHOULD BE THE "EASY" VERSION!!!
    refname = str(refcat).upper()
    if refname == 'II/312/AIS':
        refname = 'GALEX'
    elif refname == '2MASS-PSC':
        refname = 'TMASS'
    elif refname == '2MASS':
        refname = 'TMASS'
    elif refname == 'GAIA/GAIA':
        refname = 'GAIA'
    elif refname == 'Skymapper':
        refname = 'SKYMAPPER'
    elif refname == 'GLIMPSE':
        catname = 'II/293/glimpse'
    elif refname == 'SAGE':
        catname = 'II/305/archive'
    elif refname == 'ATLASREFCAT2':
        refname = 'ATLAS'

    ra0 = float(minra)
    ra1 = minra + 1.0

    outdir = '/net/dl1/users/dnidever/nsc/refcatalogs/' + refname + '/'
    if os.path.exists(outdir) == False:
        os.makedirs(outdir)
    savefile = outdir + 'ref_%.6f_%.6f_%s.fits' % (ra0, ra1, refname)
    if os.path.exists(os.path.abspath(os.path.dirname(savefile))) == False:
        os.makedirs(os.path.abspath(os.path.dirname(savefile)))

    if silent == False:
        logger.info('Querying %s: %.6f <= RA < %.6f' % (refname, ra0, ra1))

    # Loading previously loaded file
    if os.path.exists(savefile) and redo == False:
        logger.info(savefile + ' already exists and redo==False')
        return None

    # Do the Query
    #--------------
    else:

        # Use DataLab database search
        #----------------------------
        if refname in [
                'TMASS', 'GAIA', 'GAIADR2', 'GAIAEDR3', 'PS', 'SKYMAPPER',
                'SKYMAPPERDR2', 'ALLWISE', 'ATLAS'
        ]:
            if refname == 'TMASS':
                tablename = 'twomass.psc'
                cols = 'designation,ra as raj2000,dec as dej2000,j_m as jmag,j_cmsig as e_jmag,h_m as hmag,h_cmsig as e_hmag,k_m as kmag,k_cmsig as e_kmag,ph_qual as qflg'
                ##server = 'gp04.datalab.noao.edu'
                #server = 'gp01.datalab.noirlab.edu'
                ##server = 'dldb1.sdm.noao.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
            racol = 'ra'
            deccol = 'dec'
            if refname == 'GAIA':
                tablename = 'gaia_dr1.gaia_source'
                cols = 'source_id as source,ra as ra_icrs,ra_error as e_ra_icrs,dec as de_icrs,dec_error as e_de_icrs,'
                cols += 'phot_g_mean_flux as fg,phot_g_mean_flux_error as e_fg,phot_g_mean_mag as gmag'
                #server = 'gp04.datalab.noirlab.edu'
                ##server = 'gp01.datalab.noao.edu'
                ##server = 'dldb1.sdm.noao.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
            if refname == 'GAIADR2':
                tablename = 'gaia_dr2.gaia_source'
                cols = 'source_id as source,ra,ra_error,dec,dec_error,pmra,pmra_error,pmdec,pmdec_error,phot_g_mean_flux as fg,phot_g_mean_flux_error as e_fg,'
                cols += 'phot_g_mean_mag as gmag,phot_bp_mean_mag as bp,phot_bp_mean_flux as fbp,phot_bp_mean_flux_error as e_fbp,'
                cols += 'phot_rp_mean_mag as rp,phot_rp_mean_flux as frp,phot_rp_mean_flux_error as e_frp'
                #server = 'gp04.datalab.noirlab.edu'
                ##server = 'gp01.datalab.noao.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
            if refname == 'GAIAEDR3':
                tablename = 'gaia_edr3.gaia_source'
                cols = 'source_id as source,ra,ra_error,dec,dec_error,pmra,pmra_error,pmdec,pmdec_error,phot_g_mean_flux as fg,phot_g_mean_flux_error as e_fg,'
                cols += 'phot_g_mean_mag as gmag,phot_bp_mean_mag as bp,phot_bp_mean_flux as fbp,phot_bp_mean_flux_error as e_fbp,'
                cols += 'phot_rp_mean_mag as rp,phot_rp_mean_flux as frp,phot_rp_mean_flux_error as e_frp'
                #server = 'gp04.datalab.noirlab.edu'
                ##server = 'gp01.datalab.noao.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
            if refname == 'PS':
                #tablename = 'cp_calib.ps1'
                tablename = 'public.ps1'
                cols = 'ra, dec, g as gmag, r as rmag, i as imag, z as zmag, y as ymag'
                ##server = 'gp02.datalab.noirlab.edu'
                #server = 'gp01.datalab.noirlab.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
            if refname == 'SKYMAPPER':
                tablename = 'skymapper_dr1.master'
                cols = 'raj2000 as ra, dej2000 as dec, u_psf as sm_umag, e_u_psf as e_sm_umag, g_psf as sm_gmag, e_g_psf as e_sm_gmag, r_psf as sm_rmag,'
                cols += 'e_r_psf as e_sm_rmag, i_psf as sm_imag,e_i_psf as e_sm_imag, z_psf as sm_zmag, e_z_psf as e_sm_zmag'
                #server = 'gp04.datalab.noirlab.edu'
                ##server = 'gp01.datalab.noao.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
                racol = 'raj2000'
                deccol = 'dej2000'
            if refname == 'SKYMAPPERDR2':
                tablename = 'skymapper_dr2.master'
                cols = 'raj2000 as ra, dej2000 as dec, u_psf as sm_umag, e_u_psf as e_sm_umag, g_psf as sm_gmag, e_g_psf as e_sm_gmag, r_psf as sm_rmag,'
                cols += 'e_r_psf as e_sm_rmag, i_psf as sm_imag,e_i_psf as e_sm_imag, z_psf as sm_zmag, e_z_psf as e_sm_zmag'
                #server = 'gp04.datalab.noirlab.edu'
                ##server = 'gp01.datalab.noao.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
                racol = 'raj2000'
                deccol = 'dej2000'
            if refname == 'ALLWISE':
                tablename = 'allwise.source'
                #cols = 'ra, dec, w1mdef as w1mag, w1sigmdef as e_w1mag, w2mdef as w2mag, w2sigmdef as e_w2mag'
                cols = 'ra, dec, w1mpro as w1mag, w1sigmpro as e_w1mag, w2mpro as w2mag, w2sigmpro as e_w2mag'
                #server = 'gp04.datalab.noao.edu'
                #server = 'gp01.datalab.noirlab.edu'
                server = 'db02.datalab.noirlab.edu'
                user = '******'
            if refname == 'ATLAS':
                tablename = 'atlasrefcat2'
                cols = 'objid,ra,dec,plx as parallax,dplx as parallax_error,pmra,dpmra as pmra_error,pmdec,dpmdec as pmdec_error,gaia,dgaia as gaiaerr,'
                cols += 'bp,dbp as bperr,rp,drp as rperr,teff,agaia,dupvar,ag,rp1,r1,r10,g as gmag,dg as gerr,gchi,gcontrib,'
                cols += 'r as rmag, dr as rerr,rchi,rcontrib,i as imag,di as ierr,ichi,icontrib,z as zmag,dz as zerr,zchi,zcontrib,nstat,'
                cols += 'j as jmag,dj as jerr,h as hmag,dh as herr,k as kmag,dk as kerr'
                server = 'gp10.datalab.noirlab.edu'
                user = '******'

            # Use Postgres command with q3c cone search
            refcattemp = savefile.replace('.fits', '.txt')
            cmd = "psql -h " + server + " -U " + user + " -d tapdb -w --pset footer -c 'SELECT " + cols + " FROM " + tablename
            if refname == 'SKYMAPPER' or refname == 'SKYMAPPERDR2':
                cmd += " WHERE raj2000 >= %.6f and raj2000 < %.6f'" % (ra0,
                                                                       ra1)
            else:
                cmd += " WHERE ra >= %.6f and ra < %.6f'" % (ra0, ra1)
            cmd += " > " + refcattemp
            dln.remove(refcattemp, allow=True)
            dln.remove(savefile, allow=True)
            out = subprocess.check_output(cmd, shell=True)
            # Check for empty query
            tlines = dln.readlines(refcattemp, nreadline=4)
            if len(tlines) < 4:
                if silent == False:
                    logger.info('No Results')
                return []
            #  Load ASCII file and create the FITS file
            ref = ascii.read(refcattemp, data_start=3, delimiter='|')
            #ref = importascii(refcattemp,/header,delim='|',skipline=2,/silent)
            dln.remove(refcattemp, allow=True)

            # Fix 0.0 mags/errs in ATLAS
            if refname == 'ATLAS':
                magcols = [
                    'gaia', 'bp', 'rp', 'gmag', 'rmag', 'imag', 'zmag', 'jmag',
                    'hmag', 'kmag'
                ]
                errcols = [
                    'gaiaerr', 'bperr', 'rperr', 'gerr', 'rerr', 'ierr',
                    'zerr', 'jerr', 'herr', 'kerr'
                ]
                cols = ref.colnames
                # Set mags with 0.0 to 99.99
                for j in range(len(magcols)):
                    if magcols[j] in ref.colnames:
                        bdmag = (ref[magcols[j]] <= 0.0)
                        if np.sum(bdmag) > 0:
                            ref[magcols[j]][bdmag] = 99.99
                # Set errors with 0.0 to 9.99
                for j in range(len(errcols)):
                    if errcols[j] in ref.colnames:
                        bderr = (ref[errcols[j]] <= 0.0)
                        if np.sum(bderr) > 0:
                            ref[errcols[j]][bderr] = 9.99

        # Use astroquery vizier
        #----------------
        #   for low density with 2MASS/GAIA and always for GALEX and APASS
        else:

            # Use QUERYVIZIER for GALEX (python code has problems)
            #if refname == 'II/312/ais' or refname == 'GALEX':
            # if refcat eq 'APASS' then cfa=0 else cfa=1  ; cfa doesn't have APASS
            #cfa = 1   # problems with CDS VizieR and cfa has APASS now
            #if refcat == 'SAGE':
            #    cfa = 0

            if refname.upper() == 'GALEX':
                cols = [
                    'RAJ2000', 'DEJ2000', 'FUVmag', 'e_FUVmag', 'NUVmag',
                    'e_NUVmag'
                ]
                catname = 'II/335/galex_ais'
            elif refname.upper() == 'GLIMPSE':
                # Only includes GLIMPSE I,II,3D
                cols = [
                    'RAJ2000', 'DEJ2000', '_2MASS', '_3.6mag', 'e_3.6mag',
                    '_4.5mag', 'e_4.5mag'
                ]
                catname = 'II/293/glimpse'
            elif refname.upper() == 'SAGE':
                cols = [
                    'RAJ2000', 'DEJ2000', '__3.6_', 'e__3.6_', '__4.5_',
                    'e__4.5_'
                ]
                catname = 'II/305/catalog'

            Vizier.ROW_LIMIT = -1
            Vizier.TIMEOUT = 1000000
            Vizier.columns = cols
            result = Vizier.query_constraints(catalog=catname,
                                              RA='>=' + str(ra0) + ' & <' +
                                              str(ra1))

            # Check for failure
            if len(result) == 0:
                if silent == False:
                    logger.info('Failure or No Results')
                return []
            ref = result[0]
            ref.meta['description'] = ref.meta['description'][0:50]
            #ref = QUERYVIZIER(refname,[cenra,cendec],radius*60,cfa=cfa,timeout=600,/silent)

            # Fix/homogenize the GAIA tags
            if refname == 'GAIA':
                nref = len(ref)
                orig = ref.copy()
                dt = [('source', int), ('ra_icrs', float),
                      ('e_ra_icrs', float), ('de_icrs', float),
                      ('e_de_icrs', float), ('fg', float), ('e_fg', float),
                      ('gmag', float)]
                ref = np.zeros(nref, dtype=np.dtype(dt))
                ref = Table(ref)
                for n in orig.colnames:
                    ref[n] = orig[n]
                ref['fg'] = orig['_fg_']
                ref['e_fg'] = orig['e__fg_']
                ref['gmag'] = orig['_gmag_']
                del orig
            # Fix/homogenize the 2MASS tags
            elif refname == 'TMASS':
                nref = len(ref)
                orig = ref.copy()
                dt = [('designation', (np.str, 50)), ('raj2000', float),
                      ('dej2000', float), ('jmag', float), ('e_jmag', float),
                      ('hmag', float), ('e_hmag', float), ('kmag', float),
                      ('e_kmag', float), ('qflg', (np.str, 20))]
                ref = np.zeros(nref, dtype=np.dtype(dt))
                for n in orig.colnames:
                    ref[n] = orig[n]
                ref['designation'] = orig['_2mass']
                del orig
            # Fix NANs in ALLWISE
            elif refname == 'ALLWISE':
                bd, = np.where(np.isfinite(ref['_3_6_']) == False)
                if len(bd) > 0:
                    ref['_3_6_'][bd] = 99.99
                    ref['e__3_6_'][bd] = 9.99
                bd, = np.where(np.isfinite(ref['_4_5_']) == False)
                if len(bd) > 0:
                    ref['_4_5_'][bd] = 99.99
                    ref['e__4_5_'][bd] = 9.99

        # Convert all mags and errmags to float32
        for n in ref.colnames:
            if n.find('mag') > -1:
                ref[n] = ref[n].astype(np.float32)
            if n.find('e_') > -1 and n.find('mag') > -1:
                ref[n] = ref[n].astype(np.float32)
        # Lowercase column names
        for n in ref.colnames:
            ref[n].name = n.lower()
        # Convert raj2000/dej2000 to ra/dec
        if 'raj2000' in ref.colnames:
            ref['raj2000'].name = 'ra'
        if 'dej2000' in ref.colnames:
            ref['dej2000'].name = 'dec'

        # Save the file
        logger.info('Saving catalog to file ' + savefile)
        ref.write(savefile, overwrite=True)

    if silent == False:
        logger.info('%d sources found   dt=%.1f sec.' %
                    (len(ref), time.time() - t0))

    return ref
Beispiel #13
0
    def run_cigale(self,
                   data_file="cigale_in.fits",
                   config_file="pcigale.ini",
                   wait_for_input=False,
                   plot=True,
                   outdir='out',
                   compare_obs_model=False,
                   **kwargs):
        """
        Generates the input data file for CIGALE
        given the photometric points and redshift
        of a galaxy

        Args:
            ID: str, optional
                An ID for the galaxy. If none, "GalaxyA" is assigned.
            data_file (str, optional):
                Root name for the photometry data file generated used as input to CIGALE
            config_file (str, optional):
                Root name for the file where CIGALE's configuration is generated
            wait_for_input (bool, optional):
                If true, waits for the user to finish editing the auto-generated config file
                before running.
            plot (bool, optional):
                Plots the best fit SED if true
            cores (int, optional):
                Number of CPU cores to be used. Defaults
                to all cores on the system.
            outdir (str, optional):
                Path to the many outputs of CIGALE
                If not supplied, the outputs will appear in a folder named out/
            compare_obs_model (bool, optional):
                If True compare the input observed fluxes with the model fluxes
                This writes a Table to outdir named 'photo_observed_model.dat'

        kwargs:  These are passed into gen_cigale_in() and _initialise()
            sed_modules (list of 'str', optional):
                A list of SED modules to be used in the 
                PDF analysis. If this is being input, there
                should be a corresponding correct dict
                for sed_modules_params.
            sed_module_params (dict, optional):
                A dict containing parameter values for
                the input SED modules. Better not use this
                unless you know exactly what you're doing.
        """
        # Adding import statement here in case CIGALE is
        # not installed.
        from .cigale import run

        assert (len(self.photom) >
                0), "No photometry found. CIGALE cannot be run."
        assert (len(self.redshift) >
                0), "No redshift found. CIGALE cannot be run"
        new_photom = Table([self.photom])
        new_photom['redshift'] = self.z
        if self.name != '':
            new_photom['ID'] = self.name
        else:
            new_photom['ID'] = 'FRBGalaxy'

        run(new_photom, 'redshift', data_file, config_file, wait_for_input,
            plot, outdir, compare_obs_model, **kwargs)
        return
Beispiel #14
0
def main(args):
    """ finds the best models of all standard stars in the frame
    and normlize the model flux. Output is written to a file and will be called for calibration.
    """

    log = get_logger()

    log.info("mag delta %s = %f (for the pre-selection of stellar models)" %
             (args.color, args.delta_color))
    log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))

    # READ DATA
    ############################################
    # First loop through and group by exposure and spectrograph
    frames_by_expid = {}
    for filename in args.frames:
        log.info("reading %s" % filename)
        frame = io.read_frame(filename)
        expid = safe_read_key(frame.meta, "EXPID")
        camera = safe_read_key(frame.meta, "CAMERA").strip().lower()
        spec = camera[1]
        uniq_key = (expid, spec)
        if uniq_key in frames_by_expid.keys():
            frames_by_expid[uniq_key][camera] = frame
        else:
            frames_by_expid[uniq_key] = {camera: frame}

    frames = {}
    flats = {}
    skies = {}

    spectrograph = None
    starfibers = None
    starindices = None
    fibermap = None

    # For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all
    # cameras and then proceed with extracting the frame information
    # once we modify the fibermap FIBERSTATUS
    for (expid, spec), camdict in frames_by_expid.items():

        fiberstatus = None
        for frame in camdict.values():
            if fiberstatus is None:
                fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy()
            else:
                fiberstatus |= frame.fibermap['FIBERSTATUS']

        for camera, frame in camdict.items():
            frame.fibermap['FIBERSTATUS'] |= fiberstatus
            # Set fibermask flagged spectra to have 0 flux and variance
            frame = get_fiberbitmasked_frame(frame,
                                             bitmask='stdstars',
                                             ivar_framemask=True)
            frame_fibermap = frame.fibermap
            frame_starindices = np.where(isStdStar(frame_fibermap))[0]

            #- Confirm that all fluxes have entries but trust targeting bits
            #- to get basic magnitude range correct
            keep = np.ones(len(frame_starindices), dtype=bool)

            for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']:  #- and W1 and W2?
                keep &= frame_fibermap[colname][frame_starindices] > 10**(
                    (22.5 - 30) / 2.5)
                keep &= frame_fibermap[colname][frame_starindices] < 10**(
                    (22.5 - 0) / 2.5)

            frame_starindices = frame_starindices[keep]

            if spectrograph is None:
                spectrograph = frame.spectrograph
                fibermap = frame_fibermap
                starindices = frame_starindices
                starfibers = fibermap["FIBER"][starindices]

            elif spectrograph != frame.spectrograph:
                log.error("incompatible spectrographs %d != %d" %
                          (spectrograph, frame.spectrograph))
                raise ValueError("incompatible spectrographs %d != %d" %
                                 (spectrograph, frame.spectrograph))
            elif starindices.size != frame_starindices.size or np.sum(
                    starindices != frame_starindices) > 0:
                log.error("incompatible fibermap")
                raise ValueError("incompatible fibermap")

            if not camera in frames:
                frames[camera] = []

            frames[camera].append(frame)

    # possibly cleanup memory
    del frames_by_expid

    for filename in args.skymodels:
        log.info("reading %s" % filename)
        sky = io.read_sky(filename)
        camera = safe_read_key(sky.header, "CAMERA").strip().lower()
        if not camera in skies:
            skies[camera] = []
        skies[camera].append(sky)

    for filename in args.fiberflats:
        log.info("reading %s" % filename)
        flat = io.read_fiberflat(filename)
        camera = safe_read_key(flat.header, "CAMERA").strip().lower()

        # NEED TO ADD MORE CHECKS
        if camera in flats:
            log.warning(
                "cannot handle several flats of same camera (%s), will use only the first one"
                % camera)
            #raise ValueError("cannot handle several flats of same camera (%s)"%camera)
        else:
            flats[camera] = flat

    if starindices.size == 0:
        log.error("no STD star found in fibermap")
        raise ValueError("no STD star found in fibermap")

    log.info("found %d STD stars" % starindices.size)

    log.warning("Not using flux errors for Standard Star fits!")

    # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA
    ############################################
    # since poping dict, we need to copy keys to iterate over to avoid
    # RuntimeError due to changing dict
    frame_cams = list(frames.keys())
    for cam in frame_cams:

        if not cam in skies:
            log.warning("Missing sky for %s" % cam)
            frames.pop(cam)
            continue
        if not cam in flats:
            log.warning("Missing flat for %s" % cam)
            frames.pop(cam)
            continue

        flat = flats[cam]
        for frame, sky in zip(frames[cam], skies[cam]):
            frame.flux = frame.flux[starindices]
            frame.ivar = frame.ivar[starindices]
            frame.ivar *= (frame.mask[starindices] == 0)
            frame.ivar *= (sky.ivar[starindices] != 0)
            frame.ivar *= (sky.mask[starindices] == 0)
            frame.ivar *= (flat.ivar[starindices] != 0)
            frame.ivar *= (flat.mask[starindices] == 0)
            frame.flux *= (frame.ivar > 0)  # just for clean plots
            for star in range(frame.flux.shape[0]):
                ok = np.where((frame.ivar[star] > 0)
                              & (flat.fiberflat[star] != 0))[0]
                if ok.size > 0:
                    frame.flux[star] = frame.flux[star] / flat.fiberflat[
                        star] - sky.flux[star]
            frame.resolution_data = frame.resolution_data[starindices]

    # CHECK S/N
    ############################################
    # for each band in 'brz', record quadratic sum of median S/N across wavelength
    snr = dict()
    for band in ['b', 'r', 'z']:
        snr[band] = np.zeros(starindices.size)
    for cam in frames:
        band = cam[0].lower()
        for frame in frames[cam]:
            msnr = np.median(frame.flux * np.sqrt(frame.ivar) /
                             np.sqrt(np.gradient(frame.wave)),
                             axis=1)  # median SNR per sqrt(A.)
            msnr *= (msnr > 0)
            snr[band] = np.sqrt(snr[band]**2 + msnr**2)
    log.info("SNR(B) = {}".format(snr['b']))

    ###############################
    max_number_of_stars = 50
    min_blue_snr = 4.
    ###############################
    indices = np.argsort(snr['b'])[::-1][:max_number_of_stars]

    validstars = np.where(snr['b'][indices] > min_blue_snr)[0]

    #- TODO: later we filter on models based upon color, thus throwing
    #- away very blue stars for which we don't have good models.

    log.info("Number of stars with median stacked blue S/N > {} /sqrt(A) = {}".
             format(min_blue_snr, validstars.size))
    if validstars.size == 0:
        log.error("No valid star")
        sys.exit(12)

    validstars = indices[validstars]

    for band in ['b', 'r', 'z']:
        snr[band] = snr[band][validstars]

    log.info("BLUE SNR of selected stars={}".format(snr['b']))

    for cam in frames:
        for frame in frames[cam]:
            frame.flux = frame.flux[validstars]
            frame.ivar = frame.ivar[validstars]
            frame.resolution_data = frame.resolution_data[validstars]
    starindices = starindices[validstars]
    starfibers = starfibers[validstars]
    nstars = starindices.size
    fibermap = Table(fibermap[starindices])

    # MASK OUT THROUGHPUT DIP REGION
    ############################################
    mask_throughput_dip_region = True
    if mask_throughput_dip_region:
        wmin = 4300.
        wmax = 4500.
        log.warning(
            "Masking out the wavelength region [{},{}]A in the standard star fit"
            .format(wmin, wmax))
    for cam in frames:
        for frame in frames[cam]:
            ii = np.where((frame.wave >= wmin) & (frame.wave <= wmax))[0]
            if ii.size > 0:
                frame.ivar[:, ii] = 0

    # READ MODELS
    ############################################
    log.info("reading star models in %s" % args.starmodels)
    stdwave, stdflux, templateid, teff, logg, feh = io.read_stdstar_templates(
        args.starmodels)

    # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG
    ############################################

    #- Support older fibermaps
    if 'PHOTSYS' not in fibermap.colnames:
        log.warning('Old fibermap format; using defaults for missing columns')
        log.warning("    PHOTSYS = 'S'")
        log.warning("    MW_TRANSMISSION_G/R/Z = 1.0")
        log.warning("    EBV = 0.0")
        fibermap['PHOTSYS'] = 'S'
        fibermap['MW_TRANSMISSION_G'] = 1.0
        fibermap['MW_TRANSMISSION_R'] = 1.0
        fibermap['MW_TRANSMISSION_Z'] = 1.0
        fibermap['EBV'] = 0.0

    model_filters = dict()
    for band in ["G", "R", "Z"]:
        for photsys in np.unique(fibermap['PHOTSYS']):
            model_filters[band + photsys] = load_legacy_survey_filter(
                band=band, photsys=photsys)

    log.info("computing model mags for %s" % sorted(model_filters.keys()))
    model_mags = dict()
    fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
    for filter_name, filter_response in model_filters.items():
        model_mags[filter_name] = filter_response.get_ab_magnitude(
            stdflux * fluxunits, stdwave)
    log.info("done computing model mags")

    # LOOP ON STARS TO FIND BEST MODEL
    ############################################
    linear_coefficients = np.zeros((nstars, stdflux.shape[0]))
    chi2dof = np.zeros((nstars))
    redshift = np.zeros((nstars))
    normflux = []

    star_mags = dict()
    star_unextincted_mags = dict()
    for band in ['G', 'R', 'Z']:
        star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_' + band])
        star_unextincted_mags[band] = 22.5 - 2.5 * np.log10(
            fibermap['FLUX_' + band] / fibermap['MW_TRANSMISSION_' + band])

    star_colors = dict()
    star_colors['G-R'] = star_mags['G'] - star_mags['R']
    star_colors['R-Z'] = star_mags['R'] - star_mags['Z']

    star_unextincted_colors = dict()
    star_unextincted_colors[
        'G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R']
    star_unextincted_colors[
        'R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z']

    fitted_model_colors = np.zeros(nstars)

    for star in range(nstars):

        log.info("finding best model for observed star #%d" % star)

        # np.array of wave,flux,ivar,resol
        wave = {}
        flux = {}
        ivar = {}
        resolution_data = {}
        for camera in frames:
            for i, frame in enumerate(frames[camera]):
                identifier = "%s-%d" % (camera, i)
                wave[identifier] = frame.wave
                flux[identifier] = frame.flux[star]
                ivar[identifier] = frame.ivar[star]
                resolution_data[identifier] = frame.resolution_data[star]

        # preselect models based on magnitudes
        photsys = fibermap['PHOTSYS'][star]
        if not args.color in ['G-R', 'R-Z']:
            raise ValueError('Unknown color {}'.format(args.color))
        bands = args.color.split("-")
        model_colors = model_mags[bands[0] + photsys] - model_mags[bands[1] +
                                                                   photsys]

        color_diff = model_colors - star_unextincted_colors[args.color][star]
        selection = np.abs(color_diff) < args.delta_color
        if np.sum(selection) == 0:
            log.warning("no model in the selected color range for this star")
            continue

        # smallest cube in parameter space including this selection (needed for interpolation)
        new_selection = (teff >= np.min(teff[selection])) & (teff <= np.max(
            teff[selection]))
        new_selection &= (logg >= np.min(logg[selection])) & (logg <= np.max(
            logg[selection]))
        new_selection &= (feh >= np.min(feh[selection])) & (feh <= np.max(
            feh[selection]))
        selection = np.where(new_selection)[0]

        log.info(
            "star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d"
            % (star, starfibers[star], args.color,
               star_unextincted_colors[args.color][star], selection.size,
               stdflux.shape[0]))

        # Match unextincted standard stars to data
        coefficients, redshift[star], chi2dof[star] = match_templates(
            wave,
            flux,
            ivar,
            resolution_data,
            stdwave,
            stdflux[selection],
            teff[selection],
            logg[selection],
            feh[selection],
            ncpu=args.ncpu,
            z_max=args.z_max,
            z_res=args.z_res,
            template_error=args.template_error)

        linear_coefficients[star, selection] = coefficients

        log.info(
            'Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'
            .format(starfibers[star], np.inner(teff,
                                               linear_coefficients[star]),
                    np.inner(logg, linear_coefficients[star]),
                    np.inner(feh, linear_coefficients[star]), redshift[star],
                    chi2dof[star]))

        # Apply redshift to original spectrum at full resolution
        model = np.zeros(stdwave.size)
        redshifted_stdwave = stdwave * (1 + redshift[star])
        for i, c in enumerate(linear_coefficients[star]):
            if c != 0:
                model += c * np.interp(stdwave, redshifted_stdwave, stdflux[i])

        # Apply dust extinction to the model
        log.info("Applying MW dust extinction to star {} with EBV = {}".format(
            star, fibermap['EBV'][star]))
        model *= dust_transmission(stdwave, fibermap['EBV'][star])

        # Compute final color of dust-extincted model
        photsys = fibermap['PHOTSYS'][star]
        if not args.color in ['G-R', 'R-Z']:
            raise ValueError('Unknown color {}'.format(args.color))
        bands = args.color.split("-")
        model_mag1 = model_filters[bands[0] + photsys].get_ab_magnitude(
            model * fluxunits, stdwave)
        model_mag2 = model_filters[bands[1] + photsys].get_ab_magnitude(
            model * fluxunits, stdwave)
        fitted_model_colors[star] = model_mag1 - model_mag2
        if bands[0] == "R":
            model_magr = model_mag1
        elif bands[1] == "R":
            model_magr = model_mag2

        #- TODO: move this back into normalize_templates, at the cost of
        #- recalculating a model magnitude?

        # Normalize the best model using reported magnitude
        scalefac = 10**((model_magr - star_mags['R'][star]) / 2.5)

        log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format(
            model_magr, star_mags['R'][star], scalefac))
        normflux.append(model * scalefac)

    # Now write the normalized flux for all best models to a file
    normflux = np.array(normflux)

    fitted_stars = np.where(chi2dof != 0)[0]
    if fitted_stars.size == 0:
        log.error("No star has been fit.")
        sys.exit(12)

    data = {}
    data['LOGG'] = linear_coefficients[fitted_stars, :].dot(logg)
    data['TEFF'] = linear_coefficients[fitted_stars, :].dot(teff)
    data['FEH'] = linear_coefficients[fitted_stars, :].dot(feh)
    data['CHI2DOF'] = chi2dof[fitted_stars]
    data['REDSHIFT'] = redshift[fitted_stars]
    data['COEFF'] = linear_coefficients[fitted_stars, :]
    data['DATA_%s' % args.color] = star_colors[args.color][fitted_stars]
    data['MODEL_%s' % args.color] = fitted_model_colors[fitted_stars]
    data['BLUE_SNR'] = snr['b'][fitted_stars]
    data['RED_SNR'] = snr['r'][fitted_stars]
    data['NIR_SNR'] = snr['z'][fitted_stars]
    io.write_stdstar_models(args.outfile, normflux, stdwave,
                            starfibers[fitted_stars], data)
Beispiel #15
0
    def initialize_object(cls,
                          input_samples,
                          Nsamples=1000,
                          twixie_flag=False):
        """
                Read low latency posterior_samples
                """
        names = ['weight', 'm1', 'm2', 'spin1', 'spin2', 'dist_mbta']
        data_out = Table(input_samples, names=names)

        data_out['mchirp'], data_out['eta'], data_out[
            'q'] = lightcurve_utils.ms2mc(data_out['m1'], data_out['m2'])

        data_out['chi_eff'] = ((data_out['m1'] * data_out['spin1'] +
                                data_out['m2'] * data_out['spin2']) /
                               (data_out['m1'] + data_out['m2']))

        #modify 'weight' using twixie informations
        if (twixie_flag):
            twixie_file = "/home/reed.essick/mass-dip/production/O1O2-ALL_BandpassPowerLaw-MassDistBeta/twixie-sample-emcee_O1O2-ALL_MassDistBandpassPowerLaw1D-MassDistBeta2D_CLEAN.hdf5"
            (data_twixie, logprob_twixie,
             params_twixie), (massDist1D_twixie, massDist2D_twixie), (
                 ranges_twixie, fixed_twixie), (
                     posteriors_twixie,
                     injections_twixie) = backends.load_emcee_samples(
                         twixie_file, backends.DEFAULT_EMCEE_NAME)
            nstp_twixie, nwlk_twixie, ndim_twixie = data_twixie.shape
            num_1D_params_twixie = len(
                distributions.KNOWN_MassDist1D[massDist1D_twixie]._params)
            mass_model_twixie = distributions.KNOWN_MassDist1D[
                massDist1D_twixie](
                    *data_twixie[0, 0, :num_1D_params_twixie]
                )  ### assumes 1D model params always come first, which should be OK
            mass_model_twixie = distributions.KNOWN_MassDist2D[
                massDist2D_twixie](mass_model_twixie,
                                   *data_twixie[0, 0, num_1D_params_twixie:])
            min_mass_twixie, max_mass_twixie = 1.0, 100.0
            m_grid_twixie = np.linspace(min_mass_twixie, max_mass_twixie, 100)
            ans_twixie = utils.qdist(data_twixie,
                                     mass_model_twixie,
                                     m_grid_twixie,
                                     np.median(data_out['q']),
                                     num_points=100)
            ans_twixie = np.array([list(item) for item in ans_twixie])
            twixie_func = interpolate.interp1d(ans_twixie[:, 0], ans_twixie[:,
                                                                            1])
            data_out['weight'] = data_out['weight'] * twixie_func(
                data_out['q'])

        data_out['weight'] = data_out['weight'] / np.max(data_out['weight'])
        kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1)
        gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=0)
        params = np.vstack((data_out['mchirp'], data_out['q'],
                            data_out['chi_eff'], data_out['dist_mbta'])).T
        data = np.array(data_out['weight'])
        gp.fit(params, data)

        mchirp_min, mchirp_max = np.min(data_out['mchirp']), np.max(
            data_out['mchirp'])
        q_min, q_max = np.min(data_out['q']), np.max(data_out['q'])
        chi_min, chi_max = np.min(data_out['chi_eff']), np.max(
            data_out['chi_eff'])
        dist_mbta_min, dist_mbta_max = np.min(data_out['dist_mbta']), np.max(
            data_out['dist_mbta'])

        cnt = 0
        samples = []
        while cnt < Nsamples:
            mchirp = np.random.uniform(mchirp_min, mchirp_max)
            q = np.random.uniform(q_min, q_max)
            chi_eff = np.random.uniform(chi_min, chi_max)
            dist_mbta = np.random.uniform(dist_mbta_min, dist_mbta_max)
            samp = np.atleast_2d(np.array([mchirp, q, chi_eff, dist_mbta]))
            weight = gp.predict(samp)[0]
            thresh = np.random.uniform(0, 1)
            if weight > thresh:
                samples.append([mchirp, q, chi_eff, dist_mbta])
                cnt = cnt + 1
        samples = np.array(samples)
        data_out = Table(data=samples,
                         names=['mchirp', 'q', 'chi_eff', 'dist_mbta'])
        data_out["eta"] = lightcurve_utils.q2eta(data_out["q"])
        data_out["m1"], data_out["m2"] = lightcurve_utils.mc2ms(
            data_out["mchirp"], data_out["eta"])
        data_out["q"] = 1.0 / data_out["q"]

        return KNTable(data_out)
    "mass_to_light_r" : mass_to_light["UGRIZ_K0"][:,2],
    "mass_to_light_r_z0P1" : mass_to_light["UGRIZ_K0P1"][:,2],
    "mass_to_light_i" : mass_to_light["UGRIZ_K0"][:,3],
    "mass_to_light_i_z0P1" : mass_to_light["UGRIZ_K0P1"][:,3],
    "mass_to_light_z" : mass_to_light["UGRIZ_K0"][:,4],
    "mass_to_light_z_z0P1" : mass_to_light["UGRIZ_K0P1"][:,4],
    "mass_to_light_U" : mass_to_light["UBVRI_K0"][:,0],
    "mass_to_light_U_z0P1" : mass_to_light["UBVRI_K0P1"][:,0],
    "mass_to_light_B" : mass_to_light["UBVRI_K0"][:,1],
    "mass_to_light_B_z0P1" : mass_to_light["UBVRI_K0P1"][:,1],
    "mass_to_light_V" : mass_to_light["UBVRI_K0"][:,2],
    "mass_to_light_V_z0P1" : mass_to_light["UBVRI_K0P1"][:,2],
    "mass_to_light_R" : mass_to_light["UBVRI_K0"][:,3],
    "mass_to_light_R_z0P1" : mass_to_light["UBVRI_K0P1"][:,3],
    "mass_to_light_I" : mass_to_light["UBVRI_K0"][:,4],
    "mass_to_light_I_z0P1" : mass_to_light["UBVRI_K0P1"][:,4],
}
sdss_catalog = Table(sdss)
sdss_catalog_df = pd.DataFrame(sdss)
sdss_catalog_df.to_pickle("SDSS_matched_catalog")
#prop_sdss = make_prop_array(kcorrect,mass_to_light,np.size(vollim))
#np.save("prop_sdss", prop_sdss)

#import matplotlib.pyplot as plt
#plt.hist(dr8["AB_EXP"][:,2],bins=100,range=(0,1),alpha=0.5,density=True,label="SDSS b/a")
#plt.hist(dr8[vollim]["AB_EXP"][:,2],bins=100,range=(0,1),alpha=0.5,density=True,label="Vollim b/a")
#plt.xlabel("b/a")
#plt.legend()
#plt.show()
 
Beispiel #17
0
def test_compute_differential_flux_points(x_method, y_method):
    """Iterates through the 6 different combinations of input options.

    Tests against analytical result or result from gammapy.spectrum.powerlaw.
    """
    # Define the test cases for all possible options
    energy_min = np.array([1.0, 10.0])
    energy_max = np.array([10.0, 100.0])
    spectral_index = 2.0
    table = Table()
    table['ENERGY_MIN'] = energy_min
    table['ENERGY_MAX'] = energy_max
    table['ENERGY'] = np.array([2.0, 20.0])
    if x_method == 'log_center':
        energy = np.sqrt(energy_min * energy_max)
    elif x_method == 'table':
        energy = table['ENERGY'].data

    # Arbitrary model (simple exponential case)
    def diff_flux_model(x):
        return np.exp(x)

    # Integral of model
    def int_flux_model(E_min, E_max):
        return np.exp(E_max) - np.exp(E_min)

    if y_method == 'power_law':
        if x_method == 'lafferty':
            energy = _energy_lafferty_power_law(energy_min, energy_max,
                                                spectral_index)
            # Test that this is equal to analytically expected
            # log center result
            desired_energy = np.sqrt(energy_min * energy_max)
            assert_allclose(energy, desired_energy, rtol=1e-6)
        desired = power_law_evaluate(energy, 1, spectral_index, energy)
        int_flux = power_law_integral_flux(desired, spectral_index, energy,
                                           energy_min, energy_max)
    elif y_method == 'model':
        if x_method == 'lafferty':
            energy = _x_lafferty(energy_min, energy_max, diff_flux_model)
        desired = diff_flux_model(energy)
        int_flux = int_flux_model(energy_min, energy_max)
    int_flux_err = 0.1 * int_flux
    table['INT_FLUX'] = int_flux
    table['INT_FLUX_ERR_HI'] = int_flux_err
    table['INT_FLUX_ERR_LO'] = -int_flux_err

    result_table = compute_differential_flux_points(x_method, y_method, table,
                                                    diff_flux_model,
                                                    spectral_index)
    # Test energy
    actual_energy = result_table['ENERGY'].data
    desired_energy = energy
    assert_allclose(actual_energy, desired_energy, rtol=1e-3)
    # Test flux
    actual = result_table['DIFF_FLUX'].data
    assert_allclose(actual, desired, rtol=1e-2)
    # Test error
    actual = result_table['DIFF_FLUX_ERR_HI'].data
    desired = 0.1 * result_table['DIFF_FLUX'].data
    assert_allclose(actual, desired, rtol=1e-3)
Beispiel #18
0
    def build_table(self):
        """
        Create a human readable table.

        Returns
        -------
        `astropy.table.QTable`
        """
        keywords = [
            'Start Time', 'End Time', 'Source', 'Instrument', 'Type',
            'Wavelength'
        ]
        record_items = {}
        for key in keywords:
            record_items[key] = []

        def validate_time(time):
            # Handle if the time is None when coming back from VSO
            if time is None:
                return ['None']
            if record.time.start is not None:
                return [parse_time(time).strftime(TIME_FORMAT)]
            else:
                return ['N/A']

        for record in self:
            record_items['Start Time'] += validate_time(record.time.start)
            record_items['End Time'] += validate_time(record.time.end)
            record_items['Source'].append(str(record.source))
            record_items['Instrument'].append(str(record.instrument))
            if hasattr(record, 'extent') and record.extent is not None:
                record_items['Type'].append(
                    str(record.extent.type) if record.extent.
                    type is not None else ['N/A'])
            else:
                record_items['Type'].append('N/A')
            # If we have a start and end Wavelength, make a quantity
            if hasattr(record,
                       'wave') and record.wave.wavemin and record.wave.wavemax:
                unit = record.wave.waveunit
                # Convert this so astropy units parses it correctly
                if unit == "kev":
                    unit = "keV"
                record_items['Wavelength'].append(
                    u.Quantity([
                        float(record.wave.wavemin),
                        float(record.wave.wavemax)
                    ],
                               unit=unit))
            # If not save None
            else:
                record_items['Wavelength'].append(None)
        # If we have no wavelengths for the whole list, drop the col
        if all([a is None for a in record_items['Wavelength']]):
            record_items.pop('Wavelength')
            keywords.remove('Wavelength')
        else:
            # Make whole column a quantity
            try:
                with u.set_enabled_equivalencies(u.spectral()):
                    record_items['Wavelength'] = u.Quantity(
                        record_items['Wavelength'])
            # If we have mixed units or some Nones just represent as strings
            except (u.UnitConversionError, TypeError):
                record_items['Wavelength'] = [
                    str(a) for a in record_items['Wavelength']
                ]

        return Table(record_items)[keywords]
Beispiel #19
0
def make_sensitivity_curves(tint=1200, spec_res=100, sq_aper_diam=0.30, seeing_limited=False):
    mag = np.arange(10, 22)
    snr_z = np.zeros(len(mag), dtype=float)
    snr_y = np.zeros(len(mag), dtype=float)
    snr_j = np.zeros(len(mag), dtype=float)
    snr_h = np.zeros(len(mag), dtype=float)
    snr_sum_z = np.zeros(len(mag), dtype=float)
    snr_sum_y = np.zeros(len(mag), dtype=float)
    snr_sum_j = np.zeros(len(mag), dtype=float)
    snr_sum_h = np.zeros(len(mag), dtype=float)
    bkg_z = np.zeros(len(mag), dtype=float)
    bkg_y = np.zeros(len(mag), dtype=float)
    bkg_j = np.zeros(len(mag), dtype=float)
    bkg_h = np.zeros(len(mag), dtype=float)
    star_z = np.zeros(len(mag), dtype=float)
    star_y = np.zeros(len(mag), dtype=float)
    star_j = np.zeros(len(mag), dtype=float)
    star_h = np.zeros(len(mag), dtype=float)

    spec_z_tab = None
    spec_y_tab = None
    spec_j_tab = None
    spec_h_tab = None

    # Calculate the number of supernovae.
    N_SNe = 4500.0 * 0.6 * 10**(mag - 18.9)

    out_file = 'roboAO_sensitivity_t{0:d}_R{1:d}_ap{2:0.3f}'.format(tint, spec_res,
                                                                    sq_aper_diam)
    
    # Save the output to a table.
    _out = open(out_file + '.txt', 'w')

    meta1 = '# tint = {0:5d}, R = {1:5d}, sq_ap_diam = {2:5.3f}"\n'
    _out.write(meta1.format(tint, spec_res, sq_aper_diam))
    _out.write('# Sensitivity integrated over broad band.')
    
    hdr = '{0:5s}  {1:6s}  {2:5s}  {3:5s}  {4:5s}  {5:5s}  {6:5s}  {7:5s}\n'
    fmt = '{0:5.1f}  {1:6.1f}  {2:5.1f}  {3:5.1f}  {4:5.1f}  {5:5.1f}  {6:5.1f}  {7:5.1f}\n'
    _out.write(hdr.format('# Mag', 'N_SNe', 'J_SNR', 'H_SNR', 'J_ms', 'H_ms', 'J_mb', 'H_mb'))
               
    for mm in range(len(mag)):
        print 'Mag: ', mag[mm]
        blah_z = etc_uh_roboAO(mag[mm], 'Z', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam, 
                               seeing_limited=seeing_limited)
        blah_y = etc_uh_roboAO(mag[mm], 'Y', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam, 
                               seeing_limited=seeing_limited)
        blah_j = etc_uh_roboAO(mag[mm], 'J', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        blah_h = etc_uh_roboAO(mag[mm], 'H', tint,
                               spec_res=spec_res, sq_aper_diam=sq_aper_diam,
                               seeing_limited=seeing_limited)
        
        col_z_suffix = '_Z_{0:d}'.format(mag[mm])
        col_y_suffix = '_Y_{0:d}'.format(mag[mm])
        col_j_suffix = '_J_{0:d}'.format(mag[mm])
        col_h_suffix = '_H_{0:d}'.format(mag[mm])

        spec_signal_z = Column(name='sig'+col_z_suffix, data=blah_z[4])
        spec_signal_y = Column(name='sig'+col_y_suffix, data=blah_y[4])
        spec_signal_j = Column(name='sig'+col_j_suffix, data=blah_j[4])
        spec_signal_h = Column(name='sig'+col_h_suffix, data=blah_h[4])
        spec_bkg_z = Column(name='bkg'+col_z_suffix, data=blah_z[5])
        spec_bkg_y = Column(name='bkg'+col_y_suffix, data=blah_y[5])
        spec_bkg_j = Column(name='bkg'+col_j_suffix, data=blah_j[5])
        spec_bkg_h = Column(name='bkg'+col_h_suffix, data=blah_h[5])
        spec_snr_z = Column(name='snr'+col_z_suffix, data=blah_z[6])
        spec_snr_y = Column(name='snr'+col_y_suffix, data=blah_y[6])
        spec_snr_j = Column(name='snr'+col_j_suffix, data=blah_j[6])
        spec_snr_h = Column(name='snr'+col_h_suffix, data=blah_h[6])

        
        if spec_z_tab == None:
            spec_z_tab = Table([blah_z[3]], names=['wave_Z'])
        if spec_y_tab == None:
            spec_y_tab = Table([blah_y[3]], names=['wave_Y'])
        if spec_j_tab == None:
            spec_j_tab = Table([blah_j[3]], names=['wave_J'])
        if spec_h_tab == None:
            spec_h_tab = Table([blah_h[3]], names=['wave_H'])

        spec_z_tab.add_columns([spec_signal_z, spec_bkg_z, spec_snr_z])
        spec_y_tab.add_columns([spec_signal_y, spec_bkg_y, spec_snr_y])
        spec_j_tab.add_columns([spec_signal_j, spec_bkg_j, spec_snr_j])
        spec_h_tab.add_columns([spec_signal_h, spec_bkg_h, spec_snr_h])

        snr_z[mm]  = blah_z[0]
        snr_y[mm]  = blah_y[0]
        snr_j[mm]  = blah_j[0]
        snr_h[mm]  = blah_h[0]
        snr_sum_z[mm] = math.sqrt((spec_snr_z**2).sum())
        snr_sum_y[mm] = math.sqrt((spec_snr_y**2).sum())
        snr_sum_j[mm] = math.sqrt((spec_snr_j**2).sum())
        snr_sum_h[mm] = math.sqrt((spec_snr_h**2).sum())

        star_z[mm]  = blah_z[1]
        star_y[mm]  = blah_y[1]
        star_j[mm]  = blah_j[1]
        star_h[mm]  = blah_h[1]
        bkg_z[mm]  = blah_z[2]
        bkg_y[mm]  = blah_y[2]
        bkg_j[mm]  = blah_j[2]
        bkg_h[mm]  = blah_h[2]

    avg_tab = Table([mag, snr_z, snr_y, snr_j, snr_h, 
                     snr_sum_z, snr_sum_y, snr_sum_j, snr_sum_h,
                     star_z, star_y, star_j, star_h, bkg_z, bkg_y, bkg_j, bkg_h],
                    names=['mag', 'snr_z', 'snr_y', 'snr_j', 'snr_h', 
                           'snr_sum_z', 'snr_sum_y', 'snr_sum_j', 'snr_sum_h',
                           'star_z', 'star_y', 'star_j', 'star_h', 
                           'bkg_z', 'bkg_y', 'bkg_j', 'bkg_h'])


    out_file = 'roboAO_sensitivity_t{0:d}_R{1:d}_ap{2:0.3f}'.format(tint, spec_res, sq_aper_diam)

    if seeing_limited:
        out_file += '_seeing'
    
    # Save the tables
    spec_z_tab.write(out_file + '_spec_z_tab.fits', overwrite=True)
    spec_y_tab.write(out_file + '_spec_y_tab.fits', overwrite=True)
    spec_j_tab.write(out_file + '_spec_j_tab.fits', overwrite=True)
    spec_h_tab.write(out_file + '_spec_h_tab.fits', overwrite=True)
    avg_tab.write(out_file + '_avg_tab.fits', overwrite=True)

    return
    '22_briggs0': '2-2 Briggs 0',
    '22_natural': '2-2 Natural',
    'H77a': 'H$77\\alpha$ Briggs 0'
}

tbl = Table([
    Column(data=[name_mapping[name] for name in cube_names], name="Cube ID"),
    Column(data=([cubes[name].wcs.wcs.restfrq
                  for name in cube_names] * u.Hz).to(u.GHz),
           name='Frequency'),
    Column(data=[dv[name].to(u.km / u.s).value
                 for name in cube_names] * u.km / u.s,
           name='Channel Width'),
    Column(data=[errors[name].to(u.mJy).value
                 for name in cube_names] * u.mJy / u.beam,
           name='RMS'),
    Column(data=[beams[name].major.to(u.arcsec).value
                 for name in cube_names] * u.arcsec,
           name='BMAJ'),
    Column(data=[beams[name].minor.to(u.arcsec).value
                 for name in cube_names] * u.arcsec,
           name='BMIN'),
    Column(data=[beams[name].pa.to(u.deg).value
                 for name in cube_names] * u.deg,
           name='BPA'),
])
tbl.add_column(
    Column(data=[(1 * u.Jy).to(u.K,
                               u.brightness_temperature(
                                   bm.sr,
                                   freq * u.GHz,
Beispiel #21
0
def inspect_fits(parfile,output='FitInspection.pdf',vlim=[-300,300]*u.km/u.s, **kwargs):
    '''
    Produce pdf of fitting results for quality check.

    Parameters
    ----------
    parfile : str
        Name of the parameter file in the joebvp format
    output : str,optional
        Name of file to write stackplots output
    vlim : Quantity array, optional
        Velocity range of stackplots
        e.g.: [-400,

    Returns
    -------

    '''
    from joebvp import joebvpfit
    from matplotlib.backends.backend_pdf import PdfPages
    llist = LineList('ISM')

    pp=PdfPages(output)

    #import pdb;
    #pdb.set_trace()
    all=abslines_from_VPfile(parfile,linelist=llist,**kwargs) # Instantiate AbsLine objects and make list
    acl=abscomponents_from_abslines(all,vtoler=15.)  # Instantiate AbsComponent objects from this list
    fitpars,fiterrors,parinfo,linecmts = joebvpfit.readpars(parfile)


    fullmodel=makevoigt.cosvoigt(all[0].analy['spec'].wavelength.value,fitpars)

    ### Make a stackplot for each component
    for comp in acl:
        fig=comp.stack_plot(ymnx=(-0.1,1.3),show=False,return_fig=True,vlim=vlim, tight_layout=True)
        if (len(comp._abslines)<6):
            numrow = len(comp._abslines)%6
        else:
            numrow = 6
        height = numrow*1.0+0.5
        fig.set_figheight(height)
        if len(comp._abslines)<6:
            fig.set_figwidth(5.)
        else:
            fig.set_figwidth(10.)
        stackaxes = fig.axes

        for i,ax in enumerate(stackaxes):
            line = comp._abslines[i]
            thesepars=[[line.wrest.value],[line.attrib['logN']],[line.attrib['b'].value],
                       [line.z],[line.attrib['vel'].value],[line.limits.vlim[0].value],[line.limits.vlim[1].value]]
            try:
                thismodel=makevoigt.cosvoigt(line.analy['spec'].wavelength.value,thesepars)
            except:
                ax.text(-100,0.05,'These pixels not used to fit this line')
                continue
            axlin=ax.get_lines()
            veldat=axlin[0].get_data()[0]
            ax.plot(veldat,fullmodel,color='red',alpha=0.8)
            ax.plot(veldat,thismodel,color='purple',linestyle='dashed')
        botbuf = 0.5/height
        fig.subplots_adjust(bottom=botbuf,left=0.1,right=0.95,hspace=0.,wspace=0.35)

        title=comp.name
        fig.suptitle(title)
        fig.savefig(pp,format='pdf')
        plt.close(fig)

    ### Write out full model fits file
    normflux = line.analy['spec'].flux/line.analy['spec'].co
    normsig = line.analy['spec'].sig / line.analy['spec'].co
    modeltab = Table([line.analy['spec'].wavelength.value, fullmodel, normflux, normsig], names=['wavelength', 'model', 'normflux', 'normsig'])
    # modeltab.write(outfile, format='fits', overwrite=True)
    dummycont = np.ones(len(normflux))
    spec = XSpectrum1D.from_tuple((modeltab['wavelength'], modeltab['model'], modeltab['normsig'], dummycont))
    spec.write_to_fits(output[:-4]+'.fits')

    pp.close()
Beispiel #22
0
    def query_table(self, query={}, curation={}, selection={}, reorder_columns_rowidx=0,
                          add_coordinates=True, use_qtable=True):
        """
        Get a formatted table of all query results. When multiple results are present for a single value, the best one
        is picked unless the user specifies a selection. This functionality will be revisited in the future.
        The output is as a QTable which allows Quantities to be embedded in the results.

        Parameters
        ----------
        query : dict
            Query to use in MongoDB query language. Default is an empty dictionary for all results.
        curation : dict or str
            Name of file to use as curation for the data or dictionary with the field value and reference to use for
            it (otherwise will pick best=1)
        selection : dict
            Dictionary of overwrites for the supplied curation
        reorder_columns_rowidx : int or None
            Row/entry index to use as a template for column order.  If None, use
            an undefined order (determined by `astropy.Table` initializer).
        add_coordinates : bool or str
            If True, adds a 'coord' column to the table (if it does not
            exist) with a SkyCoord for this table.  If 'raise', raises an
            exception if this fails, otherwise a warning is generated.
        use_qtable : bool
            If True, the result is a QTable, otherwise, a Table

        Returns
        -------
        df : astropy.table.QTable or astropy.table.Table
            Astropy QTable of results
        """

        results = self.query_db(query=query)

        # Load curation file (JSON of best values to use)
        curation_dict = _read_curation(curation)

        # If user as provided any selection values, overwrite the curation settings
        if selection:
            curation_dict.update(selection)

        # For each entry in result, select best field.value or what the user has specified
        tab_data = []
        for entry in results:
            out_row = {}
            for key, val in entry.items():
                if not isinstance(val, (list, type(np.array([])))):
                    out_row[key] = val
                else:
                    # Select best one to return
                    if len(val) > 1:
                        if key in curation_dict.keys():
                            # If selection listed a key, use the reference information there
                            ind = np.array([x['reference'] for x in val]) == curation_dict[key]
                        else:
                            ind = np.array([x.get('best', 0) for x in val]) == 1

                        # Only proceed if you have any results to consider
                        if len(val[ind]) > 0:
                            unit = val[ind][0].get('unit')
                            if val[ind][0].get('distribution') is not None:
                                temp_dic = _get_values_from_distribution(val[ind][0].get('distribution'))
                                temp_val = temp_dic['value']
                            else:
                                temp_val = val[ind][0]['value']
                            out_row[key] = self._store_quantity(temp_val, unit)
                    else:
                        unit = val[0].get('unit')
                        if val[0].get('distribution') is not None:
                            temp_dic = _get_values_from_distribution(val[0].get('distribution'))
                            temp_val = temp_dic['value']
                        else:
                            temp_val = val[0]['value']
                        out_row[key] = self._store_quantity(temp_val, unit)

            tab_data.append(out_row)

        if use_qtable:
            tab = QTable(tab_data)
        else:
            tab = Table(tab_data)

        if add_coordinates:
            if 'coord' in tab.colnames:
                warnings.warn('"coord" column already in database table, '
                              'not adding coordinates automatically')
            else:
                try:
                    coo = SkyCoord.guess_from_table(tab)
                except Exception as e:
                    if add_coordinates == 'raise':
                        raise e
                    else:
                        warnings.warn(f'Failed to add coordinates - exception '
                                       'raised in guess_from_table: {e}')
                else:
                    tab['coord'] = coo

        if reorder_columns_rowidx is None and len(tab_data) > 0:
            return tab
        elif len(tab_data) == 0:
            return tab
        else:
            reorder_row_colnames = list(tab_data[reorder_columns_rowidx].keys())
            for colname in tab.colnames:
                if colname not in reorder_row_colnames:
                    reorder_row_colnames.append(colname)
            return tab[reorder_row_colnames]
Beispiel #23
0
table = Table(
    [
        Tile, Number, RA, DEC, r_auto, g_auto, i_auto, z_auto, uJAVA_auto,
        J0378_auto, J0395_auto, J0410_auto, J0430_auto, J0515_auto, J0660_auto,
        J0861_auto, r_MAG_ISO_GAUSS, g_MAG_ISO_GAUSS, i_MAG_ISO_GAUSS,
        z_MAG_ISO_GAUSS, uJAVA_MAG_ISO_GAUSS, J0378_MAG_ISO_GAUSS,
        J0395_MAG_ISO_GAUSS, J0410_MAG_ISO_GAUSS, J0430_MAG_ISO_GAUSS,
        J0515_MAG_ISO_GAUSS, J0660_MAG_ISO_GAUSS, J0861_MAG_ISO_GAUSS,
        r_MAG_APER_6_0, g_MAG_APER_6_0, i_MAG_APER_6_0, z_MAG_APER_6_0,
        uJAVA_MAG_APER_6_0, J0378_MAG_APER_6_0, J0395_MAG_APER_6_0,
        J0410_MAG_APER_6_0, J0430_MAG_APER_6_0, J0515_MAG_APER_6_0,
        J0660_MAG_APER_6_0, J0861_MAG_APER_6_0, r_auto_err, g_auto_err,
        i_auto_err, z_auto_err, uJAVA_auto_err, J0378_auto_err, J0395_auto_err,
        J0410_auto_err, J0430_auto_err, J0515_auto_err, J0660_auto_err,
        J0861_auto_err, r_MAG_ISO_GAUSS_ERR, g_MAG_ISO_GAUSS_ERR,
        i_MAG_ISO_GAUSS_ERR, z_MAG_ISO_GAUSS_ERR, J0378_MAG_ISO_GAUSS_ERR,
        J0395_MAG_ISO_GAUSS_ERR, J0410_MAG_ISO_GAUSS_ERR,
        J0430_MAG_ISO_GAUSS_ERR, J0515_MAG_ISO_GAUSS_ERR,
        J0660_MAG_ISO_GAUSS_ERR, J0861_MAG_ISO_GAUSS_ERR, r_MAG_APER_6_0_err,
        g_MAG_APER_6_0_err, i_MAG_APER_6_0_err, z_MAG_APER_6_0_err,
        uJAVA_MAG_APER_6_0_err, J0378_MAG_APER_6_0_err, J0395_MAG_APER_6_0_err,
        J0410_MAG_APER_6_0_err, J0430_MAG_APER_6_0_err, J0515_MAG_APER_6_0_err,
        J0660_MAG_APER_6_0_err, J0861_MAG_APER_6_0_err
    ],
    names=('Tile', 'Number', 'RA', 'Dec', 'rSDSS_auto', 'gSDSS_auto',
           'iSDSS_auto', 'zSDSS_auto', 'uJAVA_auto', 'J0378_auto',
           'J0395_auto', 'J0410_auto', 'J0430_auto', 'J0515_auto',
           'J0660_auto', 'J0861_auto', 'rSDSS_ISO_GAUSS', 'gSDSS_ISO_GAUSS',
           'iSDSS_ISO_GAUSS', 'zSDSS_ISO_GAUSS', 'uJAVA_ISO_GAUSS',
           'J0378_ISO_GAUSS', 'J0395_ISO_GAUSS', 'J0410_ISO_GAUSS',
           'J0430_ISO_GAUSS', 'J0515_ISO_GAUSS', 'J0660_ISO_GAUSS',
           'J0861_ISO_GAUSS', 'rSDSS_MAG_APER_6_0', 'gSDSS_MAG_APER_6_0',
           'iSDSS_MAG_APER_6_0', 'zSDSS_MAG_APER_6_0', 'uJAVA_MAG_APER_6_0',
           'J0378_MAG_APER_6_0', 'J0395_MAG_APER_6_0', 'J0410_MAG_APER_6_0',
           'J0430_MAG_APER_6_0', 'J0515_MAG_APER_6_0', 'J0660_MAG_APER_6_0',
           'J0861_MAG_APER_6_0', 'rSDSS_auto_err', 'gSDSS_auto_err',
           'iSDSS_auto_err', 'zSDSS_auto_err', 'uJAVA_auto_err',
           'J0378_auto_err', 'J0395_auto_err', 'J0410_auto_err',
           'J0430_auto_err', 'J0515_auto_err', 'J0660_auto_err',
           'J0861_auto_err', 'rSDSS_ISO_GAUSS_err', 'gSDSS_ISO_GAUSS_err',
           'iSDSS_ISO_GAUSS_err', 'zSDSS_ISO_GAUSS_err', 'J0378_ISO_GAUSS_err',
           'J0395_ISO_GAUSS_err', 'J0410_ISO_GAUSS_err', 'J0430_ISO_GAUSS_err',
           'J0515_ISO_GAUSS_err', 'J0660_ISO_GAUSS_err', 'J0861_ISO_GAUSS_err',
           'rSDSS_MAG_APER_6_0_err', 'gSDSS_MAG_APER_6_0_err',
           'iSDSS_MAG_APER_6_0_err', 'zSDSS_MAG_APER_6_0_err',
           'uJAVA_MAG_APER_6_0_err', 'J0378_MAG_APER_6_0_err',
           'J0395_MAG_APER_6_0_err', 'J0410_MAG_APER_6_0_err',
           'J0430_MAG_APER_6_0_err', 'J0515_MAG_APER_6_0_err',
           'J0660_MAG_APER_6_0_err', 'J0861_MAG_APER_6_0_err'),
    meta={'name': 'first table'})
Beispiel #24
0
    yref=src.galactic.b.value,
    coordsys='GAL',
    proj='TAN',
)

print(ref_image)

#list of detected sources

src_names = ["LMC N132D", "30 Dor C", "LHA 120-N 157B", "LMC P3"]
l = [280.31, 279.60, 279.55, 277.73] * u.deg
b = [-32.78, -31.91, -31.75, -32.09] * u.deg
radius = [0.2, 0.2, 0.5, 0.2] * u.deg

srctab = Table([src_names, l, b, radius],
               names=("src_names", "longitude", "latitude", "radius"),
               meta={'name': 'known source'})

off_regions = []

for s in srctab:
    #circle=CircleSkyRegion(center=SkyCoord(s['longitude'], s['latitude'], unit='deg', frame='galactic'), radius=0.2*u.deg)
    circle = CircleSkyRegion(center=SkyCoord(s['longitude'],
                                             s['latitude'],
                                             unit='deg',
                                             frame='galactic'),
                             radius=s['radius'] * u.deg)
    off_regions.append(circle)

exclusion_mask = ref_image.region_mask(off_regions[0])
exclusion_mask.data = 1 - exclusion_mask.data
Beispiel #25
0
def exptable_to_proctable(input_exptable, obstypes=None):
    """
    Converts an exposure table to a processing table and an unprocessed table. The columns unique to a processing table
    are filled with default values. If comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
    stored in the processing table.

    Args:
        input_exptable, Table. An exposure table. Each row will be converted to a row of an processing table. If
                               comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
                               stored in the processing table.
        obstypes, list or np.array. Optional. A list of exposure OBSTYPE's that should be processed (and therefore
                                              added to the processing table).

    Returns:
        processing_table, Table. The output processing table. Each row corresponds with an exposure that should be
                                 processed.
        unprocessed_table, Table. The output unprocessed table. Each row is an exposure that should not be processed.
    """
    log = get_logger()
    exptable = input_exptable.copy()

    if obstypes is None:
        obstypes = default_exptypes_for_exptable()

    ## Define the column names for the exposure table and their respective datatypes
    colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)

    # for col in ['COMMENTS']: #'HEADERERR',
    #     if col in exptable.colnames:
    #         for ii, arr in enumerate(exptable[col]):
    #             for item in arr:
    #                 clean_item = item.strip(' \t')
    #                 if len(clean_item) > 6:
    #                     keyval = None
    #                     for symb in [':', '=']:
    #                         if symb in clean_item:
    #                             keyval = [val.strip(' ') for val in clean_item.split(symb)]
    #                             break
    #                     if keyval is not None and len(keyval) == 2 and keyval[0].upper() in exptable.colnames:
    #                         key, newval = keyval[0].upper(), keyval[1]
    #                         expid, oldval = exptable['EXPID'][ii], exptable[key][ii]
    #                         log.info(
    #                             f'Found a requested correction to ExpID {expid}: Changing {key} val from {oldval} to {newval}')
    #                         exptable[key][ii] = newval

    good_exps = (exptable['EXPFLAG'] == 0)
    good_types = np.array([val in obstypes for val in exptable['OBSTYPE']]).astype(bool)
    good = (good_exps & good_types)
    good_table = exptable[good]
    unprocessed_table = exptable[~good]

    ## Remove columns that aren't relevant to processing, they will be added back in the production tables for
    ## end user viewing
    for col in ['REQRA', 'REQDEC', 'TARGTRA', 'TARGTDEC', 'HEADERERR', 'COMMENTS', 'BADEXP']:
        if col in exptable.colnames:
            good_table.remove_column(col)

    if len(good_table) > 0:
        rows = []
        for erow in good_table:
            prow = erow_to_prow(erow)#, colnames, coldtypes, coldefaults)
            rows.append(prow)
        processing_table = Table(names=colnames, dtype=coldtypes, rows=rows)
    else:
        processing_table = Table(names=colnames, dtype=coldtypes)

    return processing_table, unprocessed_table
from astropy.io import ascii
import astropy.coordinates as ac
from astropy.table import Table

# first export xlsx to csv
# remove some of header so only one line left above data

# read csv
tab = ascii.read('LWA352_tmp.csv', delimiter=',', data_start=1)

# parse values to WGS84
xx = [] 
yy = [] 
zz = [] 
names = [] 
for name, lat, lon in tab[['col2', 'col3', 'col4']]:   # may need to adjust cols to use
    print(name, lon, lat) 
    loc = ac.EarthLocation.from_geodetic(lon=lon, lat=lat)   # default uses WGS84
    xx.append(loc.x.value) 
    yy.append(loc.y.value) 
    zz.append(loc.z.value) 
    names.append(name)

# write out WGS84 values
tab = Table([xx, yy, zz, [0]*len(xx), names], names=['x', 'y', 'z', 'diam', 'name']) 
tab.write('LWA352_tmp.cfg', format='ascii')
Beispiel #27
0
    def save_table(self, table_type="csv", path=None, save_name=None):
        '''

        The results of the algorithm are saved as a csv after converting the data into a pandas dataframe.

        Parameters
        ----------

        table_type : str, optional
               Sets the output type of the table. "csv" uses the pandas package.
               "fits" uses astropy to output a FITS table.

        path : str, optional
               The path where the file should be saved.
        save_name : str, optional
                    The prefix for the saved file. If None, the name from the header is used.

        Returns
        -------

        self.dataframe : pandas dataframe
                         The dataframe is returned for use with the Analysis class.

        '''

        if save_name is None:
            save_name = self.header["OBJECT"]


        if not path:
          if table_type=="csv":
            filename = "".join([save_name,"_table",".csv"])
          elif table_type=="fits":
            filename = "".join([save_name,"_table",".fits"])

        else:
            if path[-1] != "/":
                path = "".join(path,"/")
            if table_type=="csv":
              filename = "".join([save_name,"_table",".csv"])
            elif table_type=="fits":
              filename = "".join([save_name,"_table",".fits"])

        data = {"Lengths" : self.lengths, \
                "Menger Curvature" : self.menger_curvature,\
                "Plane Orientation (RHT)" : self.rht_curvature["Mean"],\
                "RHT Curvature" : self.rht_curvature["Std"],\
                # "Estimated Width" : self.widths["Estimated Width"], \
                "Branches" : self.branch_info["filament_branches"], \
                "Branch Lengths" : self.branch_info["branch_lengths"]}

        for i, param in enumerate(self.width_fits["Names"]):
          data[param] = self.width_fits["Parameters"][:,i]
          data[param+" Error"] = self.width_fits["Errors"][:,i]

        if table_type=="csv":
          from pandas import DataFrame, Series

          for key in data.keys():
            data[key] = Series(data[key])

          df = DataFrame(data)
          df.to_csv(filename)

        elif table_type=="fits":
          from astropy.table import Table

          # Branch Lengths contains a list for each entry, which aren't accepted for BIN tables.
          if "Branch Lengths" in data.keys():
            del data["Branch Lengths"]

          df = Table(data)

          df.write(filename)

        else:
          raise NameError("Only formats supported are 'csv' and 'fits'.")


        self.dataframe = df

        return self
Beispiel #28
0
    def read_samples(cls, filename_samples):
        """
        Read LALinference posterior_samples
        """
        import os
        if not os.path.isfile(filename_samples):
            raise ValueError("Sample file supplied does not exist")

        if "hdf" in filename_samples:
            samples_out = h5py.File(filename_samples, 'r')
            samples_out = samples_out['lalinference']

            data_out = Table(samples_out)
            data_out['q'] = data_out['m1'] / data_out['m2']
            data_out['mchirp'] = (data_out['m1'] * data_out['m2'])**(
                3. / 5.) / (data_out['m1'] + data_out['m2'])**(1. / 5.)

            data_out['theta'] = data_out['iota']
            idx = np.where(data_out['theta'] > 90.)[0]
            data_out['theta'][idx] = 180 - data_out['theta'][idx]

            data_out["eta"] = lightcurve_utils.q2eta(data_out["q"])
            data_out["m1"], data_out["m2"] = lightcurve_utils.mc2ms(
                data_out["mchirp"], data_out["eta"])
            data_out['q'] = 1.0 / data_out['q']

        else:
            data_out = Table.read(filename_samples, format='ascii')

            if 'mass_1_source' in list(data_out.columns):
                data_out['m1'] = data_out['mass_1_source']
                print('setting m1 to m1_source')
            if 'mass_2_source' in list(data_out.columns):
                data_out['m2'] = data_out['mass_2_source']
                print('setting m2 to m2_source')

            if 'm1_detector_frame_Msun' in list(data_out.columns):
                data_out['m1'] = data_out['m1_detector_frame_Msun']
                print('setting m1 to m1_source')
            if 'm2_detector_frame_Msun' in list(data_out.columns):
                data_out['m2'] = data_out['m2_detector_frame_Msun']
                print('setting m2 to m2_source')

            if 'dlam_tilde' in list(data_out.columns):
                data_out['dlambdat'] = data_out['dlam_tilde']
                print('setting dlambdat to dlam_tilde')
            if 'lam_tilde' in list(data_out.columns):
                data_out['lambdat'] = data_out['lam_tilde']
                print('setting lambdat to lam_tilde')

            if 'delta_lambda_tilde' in list(data_out.columns):
                data_out['dlambdat'] = data_out['delta_lambda_tilde']
                print('setting dlambdat to delta_lambda_tilde')
            if 'lambda_tilde' in list(data_out.columns):
                data_out['lambdat'] = data_out['lambda_tilde']
                print('setting lambdat to lambda_tilde')

            if 'm1' not in list(data_out.columns):
                eta = lightcurve_utils.q2eta(data_out['mass_ratio'])
                m1, m2 = lightcurve_utils.mc2ms(data_out["chirp_mass"], eta)
                data_out['m1'] = m1
                data_out['m2'] = m2

            data_out['mchirp'], data_out['eta'], data_out[
                'q'] = lightcurve_utils.ms2mc(data_out['m1'], data_out['m2'])
            data_out['q'] = 1.0 / data_out['q']
            data_out['chi_eff'] = ((data_out['m1'] * data_out['spin1'] +
                                    data_out['m2'] * data_out['spin2']) /
                                   (data_out['m1'] + data_out['m2']))
            data_out["dist"] = data_out["luminosity_distance_Mpc"]

        return KNTable(data_out)
Beispiel #29
0
def tabulate_fit_results(data_iter,
                         band_names,
                         lambda_eff,
                         fit_func,
                         fitting_method='band',
                         config=None,
                         out_path=None):
    """Tabulate fit results for a collection of data tables

    Results already written to out_path are skipped.

    Args:
        data_iter  (iter): Iterable of photometric data for different SN
        band_names (list): Name of bands included in ``data_iter``
        lambda_eff (list): Effective wavelength for bands in ``band_names``
        fit_func   (func): Function to use to run fits
        config     (dict): Specifies priors / kwargs for fitting each model
        out_path    (str): Optionally cache results to file

    Returns:
       An astropy table with fit results
    """

    # Set default kwargs
    config = deepcopy(config) or dict()
    fitting_func = _get_fitting_func(fitting_method, band_names, lambda_eff)

    # Add meta_data to output table meta data
    if Path(out_path).exists():
        out_table = Table.read(out_path)

    else:
        out_table = Table(names=['obj_id', 'message'], dtype=['U20', 'U10000'])

    out_table.meta['band_names'] = band_names
    out_table.meta['lambda_eff'] = lambda_eff
    out_table.meta['fit_func'] = fit_func.__name__
    out_table.meta['out_path'] = str(out_path)

    for data in data_iter:
        # Get fitting priors and kwargs
        obj_id = data.meta['obj_id']
        if obj_id in out_table['obj_id']:
            continue

        salt2_prior, salt2_kwargs, sn91bg_prior, sn91bg_kwargs = \
            utils.parse_config_dict(obj_id, config)

        try:
            fit_results = fitting_func(obj_id=obj_id,
                                       data=data,
                                       fit_func=fit_func,
                                       priors_hs=salt2_prior,
                                       priors_bg=sn91bg_prior,
                                       kwargs_hs=salt2_kwargs,
                                       kwargs_bg=sn91bg_kwargs)

            out_table = vstack([out_table, fit_results])

        except KeyboardInterrupt:
            raise

        except Exception as e:
            out_table.add_row({
                'obj_id': obj_id,
                'message': str(e).replace('\n', '')
            })

        if out_path:
            out_table.write(out_path)

    return out_table
Beispiel #30
0
	newfile = open('{0}_position.txt'.format(options.obsid),'w')

	#################################################################################
	# Setting initial parameters
	#################################################################################

	# Opening the metafits file:
	metadata = fits.open('{0}.metafits'.format(options.obsid))[0].header

	# Opening the model file:
	header = fits.getheader("{0}-sources_comp.fits".format(options.obsid))
	data = fits.getdata("{0}-sources_comp.fits".format(options.obsid))

	# loading in the table data.
	t = Table(data)

	# Initialsing the RA, DEC and apparent flux vectors.
	RA = np.array(t['ra'])
	DEC = np.array(t['dec'])
	App_int_flux = np.array(t['int_flux'])
	err_App_int_flux = np.array(t['err_int_flux'])

	# Initialising the user inputted pixel scale.
	pix_scale = float(options.scale)
	thresh_cond = 0.95

	# RA and DEC of the pointing centre.
	PC_RA = metadata['RA']
	PC_DEC = metadata['DEC']