コード例 #1
0
ファイル: sky.py プロジェクト: neXyon/pynephoscope
	def calculate(self):
		ephem_location = ephem.Observer()
		ephem_location.lat = self.location.latitude.to(u.rad) / u.rad
		ephem_location.lon = self.location.longitude.to(u.rad) / u.rad
		ephem_location.elevation = self.location.height / u.meter
		ephem_location.date = ephem.Date(self.time.datetime)

		if self.data is None:
			self.alt = Latitude([], unit=u.deg)
			self.az = Longitude([], unit=u.deg)
			self.names = Column([], dtype=np.str)
			self.vmag = Column([])
		else:
			ra = Longitude((self.data['RAh'], self.data['RAm'], self.data['RAs']), u.h)
			dec = Latitude((np.core.defchararray.add(self.data['DE-'], self.data['DEd'].astype(str)).astype(int), self.data['DEm'], self.data['DEs']), u.deg)
			c = SkyCoord(ra, dec, frame='icrs')
			altaz = c.transform_to(AltAz(obstime=self.time, location=self.location))
			self.alt = altaz.alt
			self.az = altaz.az

			self.names = self.data['Name']
			self.vmag = self.data['Vmag']

		for ephemeris in self.ephemerides:
			ephemeris.compute(ephem_location)
			self.vmag = self.vmag.insert(0, ephemeris.mag)
			self.alt = self.alt.insert(0, (ephemeris.alt.znorm * u.rad).to(u.deg))
			self.az = self.az.insert(0, (ephemeris.az * u.rad).to(u.deg))
			self.names = self.names.insert(0, ephemeris.name)
		
		return self.names, self.vmag, self.alt, self.az
コード例 #2
0
ファイル: test_hdf5.py プロジェクト: MaxNoe/astropy
def test_preserve_serialized_compatibility_mode(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    with catch_warnings() as w:
        t1.write(test_file, path='the_table', serialize_meta=True,
                 overwrite=True, compatibility_mode=True)

    assert str(w[0].message).startswith(
        "compatibility mode for writing is deprecated")

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
コード例 #3
0
	def _end_column(self, name, attrs, content):
		column = Column(
			name = self.curColumn, dtype = self.curDtype,
			description = self.curDescription,
			unit = getattr(self, "curUnit", None))
		column.meta["ucd"] = self.curUcd
		column.meta["datatype"] = self.curDatatype
		column.meta["arraysize"] = self.curArraysize
		self.curTable[self.curColumn] = column
		self.inColumn = False
コード例 #4
0
ファイル: observation.py プロジェクト: JouvinLea/gammapy
    def to_column(self):
        """Convert to astropy column.

        Returns
        -------
        col : `~astropy.table.Column`
            Column with the axis info.
         """
        col = Column(data=self.bins, name=self.name)
        col.meta['axis_format'] = self.format
        return col
コード例 #5
0
ファイル: test_hdf5.py プロジェクト: MaxNoe/astropy
def test_preserve_serialized(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
コード例 #6
0
ファイル: ast.py プロジェクト: agroce/cs562w16
def	check_description_unsing_offical_method():
	t=Table()
	while True:
		Random_column_Unit=(''.join(random.choice(ascii_uppercase) for i in range(1)))
		Random_column_Name=(''.join(random.choice(ascii_uppercase) for i in range(2)))
		Random_description= (''.join(random.choice(ascii_uppercase) for i in range(12)))
		random_int= random.randint(0,9)
		random_int2= random.randint(0,9)
		random_int3= random.randint(0,9)
		t[Random_column_Name]= Column([random_int,random_int2,random_int3], unit= Random_column_Unit, description=Random_description)
		t[Random_column_Name].description = Random_description
		if (Random_description == t[Random_column_Name].description):
			print Random_description, '==', t[Random_column_Name].description
		else:
			print Random_description, '!=', t[Random_column_Name].description

		assert Random_description == t[Random_column_Name].description

		print(t)
	
		print 'Pass test for adding column: ', t[Random_column_Name]
	return 
コード例 #7
0
ファイル: test_hdf5.py プロジェクト: MaxNoe/astropy
def test_metadata_very_large(tmpdir):
    """Test that very large datasets work, now!"""
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}
    t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
    t1.meta["meta_biggerstill"] = "0" * (2 ** 18)

    t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
コード例 #8
0
def iraf_style_photometry(phot_apertures,
                          bg_apertures,
                          data,
                          error_array=None,
                          bg_method='mode',
                          epadu=1.0):
    """Computes photometry with PhotUtils apertures, with IRAF formulae

    Parameters
    ----------
    phot_apertures : photutils PixelAperture object (or subclass)
        The PhotUtils apertures object to compute the photometry.
        i.e. the object returned via CirularAperture.
    bg_apertures : photutils PixelAperture object (or subclass)
        The phoutils aperture object to measure the background in.
        i.e. the object returned via CircularAnnulus.
    data : array
        The data for the image to be measured.
    error_array: array, optional
        The array of pixelwise error of the data.  If none, the
        Poisson noise term in the error computation will just be the
        square root of the flux/epadu. If not none, the
        aperture_sum_err column output by aperture_photometry
        (divided by epadu) will be used as the Poisson noise term.
    bg_method: {'mean', 'median', 'mode'}, optional
        The statistic used to calculate the background.
        All measurements are sigma clipped.
        NOTE: From DAOPHOT, mode = 3 * median - 2 * mean.
    epadu: float, optional
        Gain in electrons per adu (only use if image units aren't e-).

    Returns
    -------
    final_tbl : astropy.table.Table
        An astropy Table with the colums X, Y, flux, flux_error, mag,
        and mag_err measurements for each of the sources.

    """

    if bg_method not in ['mean', 'median', 'mode']:
        raise ValueError('Invalid background method, choose either \
                          mean, median, or mode')

    phot = aperture_photometry(data, phot_apertures, error=error_array)
    # print("we are here!")
    bg_phot = aperture_stats_tbl(data, bg_apertures, sigma_clip=True)
    # print("we are here--2!")
    # print(phot[0])
    # print(bg_phot[0])

    if callable(phot_apertures.area):  # Handle photutils change
        ap_area = phot_apertures.area()
    else:
        ap_area = phot_apertures.area
    bg_method_name = 'aperture_{}'.format(bg_method)

    flux = phot['aperture_sum'] - bg_phot[bg_method_name] * ap_area

    flux_bkg = bg_phot[bg_method_name] * ap_area
    snr = phot['aperture_sum'] / flux_bkg  # bg_phot[bg_method_name]

    # Need to use variance of the sources
    # for Poisson noise term in error computation.
    #
    # This means error needs to be squared.
    # If no error_array error = flux ** .5
    if error_array is not None:
        flux_error = compute_phot_error(phot['aperture_sum_err']**2.0, bg_phot,
                                        bg_method, ap_area, epadu)
    else:
        flux_error = compute_phot_error(flux, bg_phot, bg_method, ap_area,
                                        epadu)

    mag = -2.5 * np.log10(flux)
    mag_err = 1.0857 * flux_error / flux

    # Make the final table
    X, Y = phot_apertures.positions.T

    # print("start....")
    X = Column(data=[
        X,
    ], name="X", dtype=float, description="XXX")
    Y = Column(data=[
        Y,
    ], name="Y", dtype=float, description="YYY")
    # print(flux.shape)
    # for t in [X, Y, flux, flux_error, mag, mag_err]:
    #     print(t, type(t), t.shape)
    # print("end....")

    stacked = np.stack([X, Y, flux, flux_error, mag, mag_err, flux_bkg, snr],
                       axis=1)
    names = [
        'X', 'Y', 'flux', 'flux_error', 'mag', 'mag_error', 'flux_bkg', 'snr'
    ]

    final_tbl = Table(data=stacked, names=names)
    return final_tbl
コード例 #9
0
def fitmastar(model='test',
              field='mastar-goodspec-v2_7_1-trunk',
              star=None,
              nfit=0,
              order=0,
              threads=8,
              write=True,
              telescope='apo25m',
              pixels=None,
              hmask=False,
              mcmc=False):
    """ Fit observed spectra in an input field, given a model
    """

    # get model and list of stars
    mod = get_model(model)
    nlab = len(mod['label_names'])
    bounds_lo = mod['x_min']
    bounds_hi = mod['x_max']

    # get stars
    stars = fits.open(field + '.fits')[1].data
    if nfit > 0: stars = stars[0:nfit]
    if star is not None:
        j = np.where(stars['MANGAID'] == star)[0]
        stars = stars[j]
    stars = Table(stars)
    stars['EBV'] = -1.

    # load up normalized spectra and uncertainties
    norms = []
    for i, star in enumerate(stars):
        norms.append((star['FLUX'], np.sqrt(1. / star['IVAR']), pixels))

    if threads == 0:
        output = []
        for i in range(len(norms)):
            out = normalize(norms[i])
            output.append(out)
    else:
        print('starting pool: ', len(norms))
        pool = mp.Pool(threads)
        output = pool.map_async(normalize, norms).get()
        pool.close()
        pool.join()

    # set initial guesses
    init = np.zeros([len(stars), nlab])
    bounds_lo = np.zeros([len(stars), nlab])
    bounds_hi = np.zeros([len(stars), nlab])
    j_teff = np.where(
        np.core.defchararray.strip(mod['label_names']) == 'TEFF')[0]
    init[:, j_teff] = 4500.
    j_logg = np.where(
        np.core.defchararray.strip(mod['label_names']) == 'LOGG')[0]
    init[:, j_logg] = 2.0
    j_rot = np.where(
        np.core.defchararray.strip(mod['label_names']) == 'LOG(VSINI)')[0]
    init[:, j_rot] = 1.01
    j_mh = np.where(
        np.core.defchararray.strip(mod['label_names']) == '[M/H]')[0]

    extcorr = fits.open('trunk/goodstars-v2_7_1-gaia-extcorr.fits')[1].data

    # rough color-temp interpolator from isochrone points
    color = [-0.457, -0.153, 0.328, 1.247, 2.172, 3.215]
    logte = [4.4822, 4.1053, 3.8512, 3.678, 3.5557, 3.5246]
    f = interp1d(color, logte, kind='linear')

    specs = []
    pix = np.arange(0, 8575, 1)
    allinit = []
    for i, star in enumerate(stars):
        j = np.where(extcorr['MANGAID'] == star['MANGAID'])[0]
        bprpc = extcorr['BPRPC'][j]
        star['EBV'] = extcorr['EBV'][j]
        if abs(bprpc) < 5:
            bounds_lo[i, :] = mod['x_min']
            bounds_hi[i, :] = mod['x_max']
            teff_est = 10.**f(np.max([np.min([bprpc, color[-1]]), color[0]]))
            init[i, j_teff] = teff_est
            if teff_est > 5000.: init[i, j_rot] = 2.3
            if teff_est > 15000.: bounds_lo[i, j_mh] = -1
            print(i, star['MANGAID'], bprpc, init[i, :], len(stars))
        if hmask:
            bd = np.where(
                (star['WAVE'] > 6563 - 100) & (star['WAVE'] < 6563 + 100)
                | (star['WAVE'] > 4861 - 100) & (star['WAVE'] < 4861 + 100)
                | (star['WAVE'] > 4341 - 100) & (star['WAVE'] < 4341 + 100))[0]
            output[i][1][bd] = 1.e-5
        specs.append((output[i][0], output[i][1], init[i, :],
                      (bounds_lo[i, :], bounds_hi[i, :]), order))

    # do the fits in parallel
    if threads == 0:
        output = []
        for i in range(len(specs)):
            out = solve(specs[i])
            print(i, stars[i])
            print(out.x)
            if out.x[0] > 7000: pdb.set_trace()
            output.append(out)
    else:
        j = np.where(
            np.core.defchararray.strip(mod['label_names']) == 'LOGG')[0]
        for i, spec in enumerate(specs):
            specs[i][2][j] = 1.
            print(specs[i][2])
        print('starting pool: ', len(specs))
        pool = mp.Pool(threads)
        output1 = pool.map_async(solve, specs).get()
        pool.close()
        pool.join()
        print('done pool 1')
        for i, spec in enumerate(specs):
            specs[i][2][j] = 5.
            print(specs[i][2])
        print('starting pool 2: ', len(specs))
        pool = mp.Pool(threads)
        output2 = pool.map_async(solve, specs).get()
        pool.close()
        pool.join()
        print('done pool 2')
        output = []
        for o1, o2 in zip(output1, output2):
            print(o1.fun, o2.fun, o1.x, o2.x)
            if o1.fun < o2.fun: output.append(o1)
            else: output.append(o2)

    if mcmc:
        newspecs = []
        for i, star in enumerate(stars):
            newspecs.append(
                (specs[i][0], specs[i][1], output[i].x,
                 '{:s}-{:d}-{:s}-{:d}'.format(star['MANGAID'], star['PLATE'],
                                              star['IFUDESIGN'], star['MJD'])))

        outmcmc = []
        if threads == 0:
            for i, star in enumerate(stars):
                out = solve_mcmc(newspecs[i])
                outmcmc.append(out)
        else:
            pool = mp.Pool(threads)
            outmcmc = pool.map_async(solve_mcmc, newspecs).get()
            pool.close()
            pool.join()

    # output FITS table
    out = Table()
    out['MANGAID'] = stars['MANGAID']
    out['EBV'] = stars['EBV']
    try:
        out['OBJRA'] = stars['OBJRA']
        out['OBJDEC'] = stars['OBJDEC']
        out['PLATE'] = stars['PLATE']
        out['IFUDESIGN'] = stars['IFUDESIGN']
        out['MJD'] = stars['MJD']
        out['MJDQUAL'] = stars['MJDQUAL']
    except:
        pass
    length = len(out)
    params = np.array([o.x for o in output])
    out.add_column(Column(name='FPARAM', data=params))
    bd = np.any((params >= bounds_hi - 0.01 * (bounds_hi - bounds_lo)) |
                (params <= bounds_lo + 0.01 * (bounds_hi - bounds_lo)),
                axis=1)
    out.add_column(Column(name='VALID', data=(np.logical_not(bd).astype(int))))
    if pixels == None: out['WAVE'] = stars['WAVE']
    else: out['WAVE'] = stars['WAVE'][:, pixels[0]:pixels[1]]
    spec = []
    err = []
    bestfit = []
    chi2 = []
    for i, star in enumerate(stars):
        spec.append(specs[i][0])
        err.append(specs[i][1])
        sfit = spectrum(pix, *params[i])
        bestfit.append(sfit)
        chi2.append(np.nansum((specs[i][0] - sfit)**2 / specs[i][1]**2))
    out.add_column(Column(name='SPEC', data=np.array(spec)))
    out.add_column(Column(name='ERR', data=np.array(err)))
    out.add_column(Column(name='SPEC_BESTFIT', data=np.array(bestfit)))
    out.add_column(Column(name='CHI2', data=np.array(chi2)))
    if write:
        out.write('nn-' + field + '-' + telescope + '.fits',
                  format='fits',
                  overwrite=True)
    return out
コード例 #10
0
def findUncertainties(thisFilter='r', \
                          nside=64, tMax=730, \
                          dbFil='minion_1016_sqlite.db', \
                          crowdError=0.2, \
                          seeingCol='FWHMeff', \
                          cleanNpz=True, \
                          doPlots=False, \
                          wrapGalacs=True, \
                          selectStrip=True):
#, \
#                          hiRes=True):

    """Catalogs the uncertainties for a given database, returns the
    file path"""

    # doPlots switches on plotting. This makes things quite a bit
    # slower for coarse healpix, and I haven't worked out how to
    # specify the output plot directory yet. Recommend default to
    # False.

    # plot functions
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    opsdb = db.OpsimDatabase(dbFil)
    outDir = 'crowding_test_2017-07-25'
    resultsDb = db.ResultsDb(outDir=outDir)

    # slicer, etc.
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    sql = 'filter="%s" and night < %i' % (thisFilter, tMax)
    plotDict={'colorMax':27.}

    # initialise the entire bundle list
    bundleList = []

    # set up for higher-resolution spatial maps DOESN'T WORK ON LAPTOP
    #if hiRes:
    #    mafMap = maps.StellarDensityMap(nside=128)
    #else:
    #    mafMap = maps.StellarDensityMap(nside=64)

    # if passed a single number, turn the crowdErr into a list
    if str(crowdError.__class__).find('list') < 0:
        crowdVals = [np.copy(crowdError)]
    else:
        crowdVals = crowdError[:]

    # build up the bundle list. Build up a list of crowding values and
    # their column names. HARDCODED for the moment, can make the input list
    # an argument if desired.
    crowdStem = '%sCrowd' % (thisFilter)
    lCrowdCols = []
    # loop through the crowding values
    #crowdVals = [0.2, 0.1, 0.05]
    for errCrowd in crowdVals:  
        crowdName = '%s%.3f' % (crowdStem,errCrowd)
        lCrowdCols.append(crowdName) # to pass later
        metricThis = metrics.CrowdingMetric(crowding_error=errCrowd, \
                                                seeingCol='FWHMeff')
        bundleThis = metricBundles.MetricBundle(metricThis, slicer, sql, \
                                                    plotDict=plotDict, \
                                                    fileRoot=crowdName, \
                                                    runName=crowdName, \
                                                    plotFuncs=plotFuncs)

        bundleList.append(bundleThis)

    #metric = metrics.CrowdingMetric(crowding_error=crowdError, \
    #    seeingCol=seeingCol)
    #bundle = metricBundles.MetricBundle(metric,\
    #                                        slicer,sql, plotDict=plotDict, \
    #                                        plotFuncs=plotFuncs)
    #bundleList.append(bundle)
    
    # ... then the m5col
    metricCoadd = metrics.Coaddm5Metric()
    bundleCoadd = metricBundles.MetricBundle(metricCoadd,\
                                                 slicer,sql,plotDict=plotDict, \
                                                 plotFuncs=plotFuncs)
    bundleList.append(bundleCoadd)
    
    # Let's also pass through some useful statistics
    # per-HEALPIX. We'll want to bring across the output metric names
    # as well so that we can conveniently access them later.
    # some convenient plot functions
    statsCols = ['FWHMeff', 'fiveSigmaDepth', 'airmass']
    metricNames = [ 'MedianMetric', 'RobustRmsMetric', 'MinMetric', 'MaxMetric']
    statsNames = {}
    sKeyTail = '_%s_and_night_lt_%i_HEAL' % (thisFilter, tMax)

    # may as well get good plot dicts too...
    plotDicts = {}
    plotDicts['FWHMeff_MedianMetric'] = {'colorMax':2.}
    plotDicts['fiveSigmaDepth_MedianMetric'] = {'colorMax':26.}
    plotDicts['airmass_MedianMetric'] = {'colorMax':2.5}
    plotDicts['FWHMeff_RobustRmsMetric'] = {'colorMax':1.}
    plotDicts['fiveSigmaDepth_RobustRmsMetric'] = {'colorMax':2.}
    plotDicts['airmass_RobustRmsMetric'] = {'colorMax':1.}

    # initialize the minmax values for the moment
    plotDicts['FWHMeff_MinMetric'] = {'colorMax':3}
    plotDicts['FWHMeff_MaxMetric'] = {'colorMax':3}

    # ensure they all have xMax as well
    for sKey in plotDicts.keys():
        plotDicts[sKey]['xMax'] = plotDicts[sKey]['colorMax']

    for colName in statsCols:
        for metricName in metricNames:

            # lift out the appropriate plotdict
            plotDict = {}
            sDict = '%s_%s' % (colName, metricName)
            if sDict in plotDicts.keys():
                plotDict = plotDicts[sDict]

            thisMetric = getattr(metrics, metricName)
            metricObj = thisMetric(col=colName)
            bundleObj = metricBundles.MetricBundle(metricObj,slicer,sql, \
                                                       plotDict=plotDict, \
                                                       plotFuncs=plotFuncs)

            bundleList.append(bundleObj)

            # construct the output table column name and the key for
            # the bundle object
            tableCol = '%s%s_%s' % (thisFilter, colName, metricName)
            statsNames[tableCol] = 'opsim_%s_%s%s' \
                % (metricName.split('Metric')[0], colName, sKeyTail)
        
            # as a debug, see if this is actually working...
            # print tableCol, statsNames[tableCol], statsNames[tableCol]

    # try the number of visits
    col2Count = 'fiveSigmaDepth'
    metricN = metrics.CountMetric(col=col2Count)
    bundleN = metricBundles.MetricBundle(metricN, slicer, sql, \
                                             plotFuncs=plotFuncs)
    bundleList.append(bundleN)
    countCol = '%sCount' % (thisFilter)
    statsNames[countCol] = 'opsim_Count_%s%s' % (col2Count, sKeyTail)

    # convert to the bundledict...
    bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
    bgroup = metricBundles.MetricBundleGroup(bundleDict, \
                                                 opsdb, outDir=outDir, \
                                                 resultsDb=resultsDb)

    # ... and run...
    bgroup.runAll()

    # ... also plot...
    if doPlots:
        bgroup.plotAll()

    # now produce the table for this run
    nameDepth = 'opsim_CoaddM5_%s_and_night_lt_%i_HEAL' \
        % (thisFilter, tMax)
    nameCrowd = 'opsim_Crowding_To_Precision_%s_and_night_lt_%i_HEAL' \
        % (thisFilter, tMax)

    npix = bgroup.bundleDict[nameDepth].metricValues.size
    nsideFound = hp.npix2nside(npix)
    ra, dec = healpyUtils.hpid2RaDec(nside, np.arange(npix))
    cc = SkyCoord(ra=np.copy(ra), dec=np.copy(dec), frame='fk5', unit='deg')

    # boolean mask for nonzero entries
    bVal = ~bgroup.bundleDict[nameDepth].metricValues.mask

    # Generate the table
    tVals = Table()
    tVals['HEALPIX'] = np.arange(npix)
    tVals['RA'] = cc.ra.degree
    tVals['DE'] = cc.dec.degree
    tVals['l'] = cc.galactic.l.degree
    tVals['b'] = cc.galactic.b.degree

    # wrap Galactics?
    if wrapGalacs:
        bBig = tVals['l'] > 180.
        tVals['l'][bBig] -= 360.

    sCoadd = '%sCoadd' % (thisFilter)
    sCrowd = '%sCrowd' % (thisFilter)

    tVals[sCoadd] = Column(bgroup.bundleDict[nameDepth].metricValues, \
                               dtype='float')

    # REPLACE the single-crowding with the set of columns, like so:
    #tVals[sCrowd] = Column(bgroup.bundleDict[nameCrowd].metricValues, \
    #                           dtype='float')

    for colCrowd in lCrowdCols:
        tVals[colCrowd] = Column(bgroup.bundleDict[colCrowd].metricValues, \
                                     dtype='float', format='%.3f')

    # enforce rounding. Three decimal places ought to be sufficient
    # for most purposes. See if the Table constructor follows this
    # through. (DOESN'T SEEM TO WORK when writing to fits anyway...)
    tVals[sCoadd].format='%.3f'
    #tVals[sCrowd].format='%.2f'  # (may only get reported to 1 d.p. anyway)

    #tVals['%sCrowdBri' % (thisFilter)] = \
    #    np.asarray(tVals[sCrowd] < tVals[sCoadd], 'int')

    # add the mask as a boolean
    tVals['%sGood' % (thisFilter)] = \
        np.asarray(bVal, 'int')

    # now add all the summary statistics for which we asked. Try
    # specifying the datatype
    for sStat in statsNames.keys():
        tVals[sStat] = Column(\
            bgroup.bundleDict[statsNames[sStat]].metricValues, \
                dtype='float')

    tVals[countCol] = Column(bgroup.bundleDict[statsNames[countCol]].metricValues, dtype='int')

    # cut down by mask
    #tVals = tVals[bVal]

    # Set metadata and write to disk. Add comments later.
    tVals.meta['nsideFound'] = nsideFound
    tVals.meta['tMax'] = tMax
    tVals.meta['crowdError'] = crowdVals
    tVals.meta['countedCol'] = col2Count[:]

    # Can select only within strip to cut down on space requirements
    sSel=''
    if selectStrip:
        bMin = -30.
        bMax = +25.
        lMin = -150.
        lMax = 80.
        sSel = '_nrPlane'

        bStrip = (tVals['b'] >= bMin) & \
            (tVals['b'] <= bMax) & \
            (tVals['l'] >= lMin) & \
            (tVals['l'] <= lMax)

        tVals = tVals[bStrip]

        tVals.meta['sel_lMin'] = lMin
        tVals.meta['sel_lMax'] = lMax
        tVals.meta['sel_bMin'] = bMin
        tVals.meta['sel_bMax'] = bMax

    # metadata
    tVals.meta['selectStrip'] = selectStrip

    # generate output path
    pathTab = '%s/table_uncty_%s_%s_nside%i_tmax%i%s.fits' % \
        (outDir, dbFil.split('_sqlite')[0], thisFilter, nside, tMax, sSel)

    # save the table
    tVals.write(pathTab, overwrite=True)

    # give this method the capability to remove the npz file (useful
    # if we want to go to very high spatial resolution for some
    # reason):
    if cleanNpz:
        for pathNp in glob.glob('%s/*.npz' % (outDir)):
            os.remove(pathNp)

    return pathTab
コード例 #11
0
# downselect (need bsens=true to avoid duplication - but be wary in case you remove duplicates upstream!)
keep = ((tbl['suffix'] == 'finaliter') &
        (tbl['robust'] == 'r0.0') &
        (~tbl['pbcor']) &
        (tbl['bsens']) &
        (~bad))


wtbl = tbl[keep]


print(len(wtbl))
print(wtbl)

wtbl['selfcaliter'] = Column(data=[int(x[2:]) for x in wtbl['selfcaliter']])
wtbl['bsens_div_cleanest_mad'] = wtbl['mad_bsens'] / wtbl['mad_cleanest']
wtbl['bsens_div_cleanest_max'] = wtbl['max_bsens'] / wtbl['max_cleanest']
wtbl['bsens_mad_div_req'] = wtbl['mad_bsens'] / wtbl['Req_Sens'] * 1e3


cols_to_keep = {'region':'Region',
                'band':'Band',
                #'selfcaliter':'$n_{sc}$',
                #'bmaj':r'$\theta_{maj}$',
                #'bmin':r'$\theta_{min}$',
                #'bpa':'BPA',
                #'Req_Res': r"$\theta_{req}$",
                #'BeamVsReq': r"$\theta_{req}/\theta_{maj}$",
                #'peak/mad': "DR",
                #'peak':'$S_{peak}$',
コード例 #12
0
def plot(file='all_noelem',
         model='GKh_300_0',
         raw=True,
         plotspec=False,
         validation=True,
         normalize=False,
         pixels=None,
         teff=[0, 10000],
         logg=[-1, 6],
         mh=[-3, 1],
         am=[-1, 1],
         cm=[-2, 2],
         nm=[-2, 2],
         trim=True,
         ids=False):
    ''' plots to assess quality of a model
    '''
    # load model and set up for use
    NN_coeffs = get_model(model)

    # read spectra and labels, and get indices for training and validation set
    if ids:
        true, labels, iden = read(file,
                                  raw=raw,
                                  label_names=NN_coeffs['label_names'],
                                  trim=trim,
                                  ids=ids)
    else:
        true, labels = read(file,
                            raw=raw,
                            label_names=NN_coeffs['label_names'],
                            trim=trim)
    if normalize:
        print('normalizing...')
        gdspec = []
        n = 0
        for i in range(true.shape[0]):
            print(i, labels[i])
            cont = norm.cont(true[i, :],
                             true[i, :],
                             poly=False,
                             chips=False,
                             medfilt=400)
            true[i, :] /= cont
            if pixels is None:
                gd = np.where(np.isfinite(true[i, :]))[0]
                ntot = len(true[i, :])
            else:
                gd = np.where(np.isfinite(true[i, pixels[0]:pixels[1]]))[0]
                ntot = len(true[i, pixels[0]:pixels[1]])
            if len(gd) == ntot:
                gdspec.append(i)
                n += 1
        print(n, true.shape)
        if pixels is None: true = true[gdspec, :]
        else: true = true[gdspec, pixels[0]:pixels[1]]
        labels = labels[gdspec]
        if ids: iden = iden[gdspec]

    #gd=np.where((labels[:,0]>=teff[0]) & (labels[:,0]<=teff[1]) &
    #            (labels[:,1]>=logg[0]) & (labels[:,1]<=logg[1]) &
    #            (labels[:,2]>=mh[0]) & (labels[:,2]<=mh[1]) &
    #            (labels[:,3]>=am[0]) & (labels[:,3]<=am[1]) &
    #            (labels[:,4]>=cm[0]) & (labels[:,4]<=cm[1])  &
    #            (labels[:,5]>=nm[0]) & (labels[:,5]<=nm[1])
    #           )[0]
    #pdb.set_trace()
    #true = true[gd]
    #labels = labels[gd]

    nfit = NN_coeffs['nfit']
    ind_shuffle = NN_coeffs['ind_shuffle']
    true = true[ind_shuffle]
    labels = labels[ind_shuffle]
    if ids: iden = iden[ind_shuffle]
    if validation:
        true = true[nfit:]
        labels = labels[nfit:]
        if ids: iden = iden[nfit:]
    else:
        true = true[:nfit]
        labels = labels[:nfit]
        if ids: iden = iden[:nfit]

    # loop over the spectra
    if plotspec: plt.figure()
    nn = []
    diff2 = []
    for i, lab in enumerate(labels):
        # calculate model spectrum and accumulate model array
        pix = np.arange(8575)
        spec = spectrum(pix, *lab)
        nn.append(spec)
        tmp = np.sum((spec - true[i, :])**2)
        print(i, tmp, lab)
        diff2.append(tmp)
        if plotspec and tmp > 100:
            plt.clf()
            plt.plot(true[i, :], color='g')
            plt.plot(spec, color='b')
            plt.plot(spec - true[i, :], color='r')
            plt.show()
            pdb.set_trace()
        #n=len(np.where(np.abs(apstar[j]-true[i,j]) > 0.05)[0])
    nn = np.array(nn)
    diff2 = np.array(diff2)
    #fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001,sharex=True,sharey=True)
    #plots.plotc(ax[0,0],labels[:,0],labels[:,1],labels[:,2],xr=[8000,3000],yr=[6,-1],zr=[-2.5,0.5])
    #plots.plotc(ax[1,0],labels[:,0],labels[:,1],labels[:,3],xr=[8000,3000],yr=[6,-1],zr=[-0.25,0.5])
    #plots.plotc(ax[1,1],labels[:,0],labels[:,1],diff2,xr=[8000,3000],yr=[6,-1],zr=[0,10])
    #ax[1,1].text(0.,0.9,'diff**2',transform=ax[1,1].transAxes)
    fig, ax = plots.multi(1,
                          1,
                          hspace=0.001,
                          wspace=0.001,
                          sharex=True,
                          sharey=True)
    plots.plotc(ax,
                labels[:, 0],
                labels[:, 1],
                diff2,
                xr=[8000, 3000],
                yr=[6, -1],
                zr=[0, 10])
    if ids:
        data = Table()
        data.add_column(Column(name='ID', data=iden))
        data.add_column(Column(name='TEFF', data=labels[:, 0]))
        data.add_column(Column(name='LOGG', data=labels[:, 1]))
        data.add_column(Column(name='MH', data=labels[:, 2]))
        data.add_column(Column(name='AM', data=labels[:, 3]))
        plots._data = data
        plots._id_cols = ['ID', 'TEFF', 'LOGG', 'MH', 'AM']
    plots.event(fig)
    plt.draw()
    key = ' '
    sfig, sax = plots.multi(1, 2, hspace=0.001, sharex=True)
    pdb.set_trace()
    print('entering event loop....')
    while key != 'e' and key != 'E':
        x, y, key, index = plots.mark(fig)
        sax[0].cla()
        sax[0].plot(true[index, :], color='g')
        sax[0].plot(nn[index, :], color='b')
        sax[1].cla()
        sax[1].plot(nn[index, :] / true[index, :], color='g')
        plt.figure(sfig.number)
        plt.draw()

    fig.savefig(file + '_' + model + '.png')

    # histogram of ratio of nn to true
    print("making nn/raw comparison histogram ...")
    # pixels across sample
    fig, ax = plots.multi(2, 2, figsize=(12, 8))
    # percentiles across wavelength
    fig2, ax2 = plots.multi(1, 3, hspace=0.001)
    # in parameter space
    fig3, ax3 = plots.multi(2, 3, hspace=0.001, wspace=0.001)
    for f in [fig, fig2, fig3]:
        if validation: f.suptitle('validation set')
        else: f.suptitle('training set')

    # consider full sample and several bins in Teff and [M/H]
    tbins = [[3000, 8000], [3000, 4000], [4000, 5000], [5000, 6000],
             [3000, 4000], [4000, 5000], [5000, 6000]]
    mhbins = [[-2.5, 1.0], [-0.5, 1.0], [-0.5, 1.0], [-0.5, 1.0], [-2.5, -0.5],
              [-2.5, -0.5], [-2.5, -0.5]]
    names = [
        'all', '3000<Te<4000, M/H>-0.5', '4000<Te<5000, M/H>-0.5',
        '5000<Te<6000, M/H>-0.5', '3000<Te<4000, M/H<-0.5',
        '4000<Te<5000, M/H<-0.5', '5000<Te<6000, M/H<-0.5'
    ]
    colors = ['k', 'r', 'g', 'b', 'c', 'm', 'y']
    lws = [3, 1, 1, 1, 1, 1, 1]

    for tbin, mhbin, name, color, lw in zip(tbins, mhbins, names, colors, lws):
        gd = np.where((labels[:, 0] >= tbin[0]) & (labels[:, 0] <= tbin[1])
                      & (labels[:, 2] >= mhbin[0])
                      & (labels[:, 2] <= mhbin[1]))[0]
        print(tbin, len(gd))
        if len(gd) > 0:
            t1 = nn[gd, :]
            t2 = true[gd, :]

            # differential fractional error of all pixels
            err = (t1 - t2) / t2
            hist, bins = np.histogram(err.flatten(),
                                      bins=np.linspace(-0.2, 0.2, 4001))
            plots.plotl(ax[0, 0],
                        np.linspace(-0.200 + 0.005, 0.2, 4000),
                        hist / hist.sum(),
                        semilogy=True,
                        xt='(nn-true)/true',
                        label=name,
                        xr=[-0.1, 0.25],
                        color=color,
                        linewidth=lw)
            ax[0, 0].legend(fontsize='x-small')

            # cumulative fractional error of all pixels
            err = np.abs(err)
            hist, bins = np.histogram(err.flatten(),
                                      bins=np.logspace(-7, 3, 501))
            plots.plotl(ax[0, 1],
                        np.logspace(-7, 3, 500),
                        np.cumsum(hist) / np.float(hist.sum()),
                        xt='nn/true',
                        label=name,
                        color=color,
                        linewidth=lw)
            ax[0, 1].set_ylabel('Cumulative fraction, all pixels')

            # get percentiles across models at each wavelength
            p = [50, 95, 99]
            perc = np.percentile(err, p, axis=0)
            npix = perc.shape[1]
            for i in range(3):
                plots.plotl(ax2[i],
                            np.arange(npix),
                            perc[i, :],
                            color=color,
                            linewidth=lw,
                            xt='Pixel number')
                ax2[i].text(0.05,
                            0.9,
                            'error at {:d} percentile'.format(p[i]),
                            transform=ax2[i].transAxes)

            # cumulative of 50 and 95 percentile across models
            hist, bins = np.histogram(perc[0, :], bins=np.logspace(-7, 3, 501))
            plots.plotl(ax[1, 0],
                        np.logspace(-7, 3, 500),
                        np.cumsum(hist) / np.float(hist.sum()),
                        color=color,
                        ls=':',
                        linewidth=lw)
            hist, bins = np.histogram(perc[1, :], bins=np.logspace(-7, 3, 501))
            plots.plotl(ax[1, 0],
                        np.logspace(-7, 3, 500),
                        np.cumsum(hist) / np.float(hist.sum()),
                        color=color,
                        linewidth=lw,
                        ls='--')
            hist, bins = np.histogram(perc[1, :], bins=np.logspace(-7, 3, 501))
            plots.plotl(ax[1, 0],
                        np.logspace(-7, 3, 500),
                        np.cumsum(hist) / np.float(hist.sum()),
                        color=color,
                        linewidth=lw)
            ax[1, 0].set_ylabel('Cumulative, fraction of pixels')

            # cumulative of 50 and 95 percentile across wavelengths
            p = [50, 95, 99, 100]
            perc = np.percentile(err, p, axis=1)
            hist, bins = np.histogram(perc[0, :], bins=np.logspace(-7, 3, 501))
            plots.plotl(ax[1, 1],
                        np.logspace(-7, 3, 500),
                        np.cumsum(hist) / np.float(hist.sum()),
                        color=color,
                        ls=':',
                        linewidth=lw)
            hist, bins = np.histogram(perc[1, :], bins=np.logspace(-7, 3, 501))
            plots.plotl(ax[1, 1],
                        np.logspace(-7, 3, 500),
                        np.cumsum(hist) / np.float(hist.sum()),
                        color=color,
                        linewidth=lw,
                        ls='--')
            hist, bins = np.histogram(perc[1, :], bins=np.logspace(-7, 3, 501))
            plots.plotl(ax[1, 1],
                        np.logspace(-7, 3, 500),
                        np.cumsum(hist) / np.float(hist.sum()),
                        color=color,
                        linewidth=lw)
            ax[1, 1].set_ylabel('Cumulative, fraction of models')

            for ix, iy in zip([1, 0, 1], [0, 1, 1]):
                ax[iy, ix].set_xlim(0., 0.01)
                ax[iy, ix].set_ylim(0., 1.0)
                ax[iy, ix].set_xlabel('|(nn-true)/true|')
                ax[iy, ix].set_xscale('log')
                ax[iy, ix].set_xlim(1.e-4, 0.01)

            # Kiel diagram plots color-coded
            if lw == 3:
                # color-code by value of 50, 95, and 99 percentile of wavelengths for each model
                p = [50, 95, 99]
                perc_mod = np.percentile(err, p, axis=1)
                dx = np.random.uniform(size=len(gd)) * 50 - 25
                dy = np.random.uniform(size=len(gd)) * 0.2 - 0.1
                for i in range(3):
                    plots.plotc(ax3[i, 0],
                                labels[gd, 0] + dx,
                                labels[gd, 1] + dy,
                                perc_mod[i, :],
                                xr=[8000, 3000],
                                yr=[6, -1],
                                zr=[0, 0.1],
                                xt='Teff',
                                yt='log g')
                    ax3[i, 0].text(0.1,
                                   0.9,
                                   'error at {:d} percentile'.format(p[i]),
                                   transform=ax3[i, 0].transAxes)
                # color-code by fraction of pixels worse than 0.01
                for i, thresh in enumerate([0.01, 0.05, 0.1]):
                    mask = copy.copy(err)
                    mask[mask <= thresh] = 0
                    mask[mask > thresh] = 1
                    bdfrac = mask.sum(axis=1) / mask.shape[1]
                    axim = plots.plotc(ax3[i, 1],
                                       labels[gd, 0] + dx,
                                       labels[gd, 1] + dy,
                                       bdfrac,
                                       xr=[8000, 3000],
                                       yr=[6, -1],
                                       zr=[0, 0.1],
                                       xt='Teff')
                    ax3[i,
                        1].text(0.1,
                                0.9,
                                'Fraction of pixels> {:4.2f}'.format(thresh),
                                transform=ax3[i, 1].transAxes)
                cax = plt.axes([0.05, 0.03, 0.9, 0.02])
                fig3.colorbar(axim, cax=cax, orientation='horizontal')

    fig.tight_layout()
    plt.draw()
    fig.savefig(file + '_' + model + '_1.png')
    fig2.savefig(file + '_' + model + '_2.png')
    fig3.savefig(file + '_' + model + '_3.png')
    pdb.set_trace()
    plt.close()
    plt.close()
    plt.close()
    plt.close()
    return nn, true, labels
コード例 #13
0
ファイル: source.py プロジェクト: astronomyk/ScopeSim
 def _from_table(self, tbl, spectra):
     if "weight" not in tbl.colnames:
         tbl.add_column(Column(name="weight", data=np.ones(len(tbl))))
     tbl["ref"] += len(self.spectra)
     self.fields += [tbl]
     self.spectra += spectra
コード例 #14
0
ファイル: hdlls.py プロジェクト: li-jr/igmspec
def hdf5_adddata(hdf,
                 sname,
                 meta,
                 debug=False,
                 chk_meta_only=False,
                 mk_test_file=False):
    """ Append HD-LLS data to the h5 file

    Parameters
    ----------
    hdf : hdf5 pointer
    IDs : ndarray
      int array of IGM_ID values in mainDB
    sname : str
      Survey name
    chk_meta_only : bool, optional
      Only check meta file;  will not write
    mk_test_file : bool, optional
      Generate the debug test file for Travis??

    Returns
    -------

    """
    from specdb import defs
    # Add Survey
    print("Adding {:s} survey to DB".format(sname))
    hdlls_grp = hdf.create_group(sname)
    # Load up
    Rdicts = defs.get_res_dicts()
    mike_meta = grab_meta_mike()
    mike_coord = SkyCoord(ra=mike_meta['RA_GROUP'],
                          dec=mike_meta['DEC_GROUP'],
                          unit='deg')
    # Checks
    if sname != 'HD-LLS_DR1':
        raise IOError("Not expecting this survey..")
    full_coord = SkyCoord(ra=meta['RA_GROUP'],
                          dec=meta['DEC_GROUP'],
                          unit='deg')

    # Build spectra (and parse for meta)
    if mk_test_file:
        meta = meta[0:3]
    nspec = len(meta)
    max_npix = 210000  # Just needs to be large enough
    data = init_data(max_npix, include_co=False)
    # Init
    full_idx = np.zeros(len(meta), dtype=int)
    spec_set = hdf[sname].create_dataset('spec',
                                         data=data,
                                         chunks=True,
                                         maxshape=(None, ),
                                         compression='gzip')
    spec_set.resize((nspec, ))
    Rlist = []
    wvminlist = []
    wvmaxlist = []
    dateobslist = []
    npixlist = []
    instrlist = []
    gratinglist = []
    telelist = []
    # Loop
    members = glob.glob(os.getenv('RAW_IGMSPEC') + '/{:s}/*fits'.format(sname))
    kk = -1
    for jj, member in enumerate(members):
        if 'HD-LLS_DR1.fits' in member:
            continue
        kk += 1
        # Extract
        f = member
        hdu = fits.open(f)
        # Parse name
        fname = f.split('/')[-1]
        mt = np.where(meta['SPEC_FILE'] == fname)[0]
        if mk_test_file and (jj >= 3):
            continue
        if len(mt) != 1:
            pdb.set_trace()
            raise ValueError("HD-LLS: No match to spectral file?!")
        else:
            print('loading {:s}'.format(fname))
            full_idx[kk] = mt[0]
        # npix
        head = hdu[0].header
        # Some fiddling about
        for key in ['wave', 'flux', 'sig']:
            data[key] = 0.  # Important to init (for compression too)
        # Double check
        if kk == 0:
            assert hdu[1].name == 'ERROR'
            assert hdu[2].name == 'WAVELENGTH'
        # Write
        spec = lsio.readspec(f)  # Handles dummy pixels in ESI
        npix = spec.npix
        if npix > max_npix:
            raise ValueError(
                "Not enough pixels in the data... ({:d})".format(npix))
        data['flux'][0][:npix] = spec.flux.value
        data['sig'][0][:npix] = spec.sig.value
        data['wave'][0][:npix] = spec.wavelength.value
        #data['flux'][0][:npix] = hdu[0].data
        #data['sig'][0][:npix] = hdu[1].data
        #data['wave'][0][:npix] = hdu[2].data
        # Meta
        wvminlist.append(np.min(data['wave'][0][:npix]))
        wvmaxlist.append(np.max(data['wave'][0][:npix]))
        npixlist.append(npix)
        if 'HIRES' in fname:
            instrlist.append('HIRES')
            telelist.append('Keck-I')
            gratinglist.append('BOTH')
            try:
                Rlist.append(set_resolution(head))
            except ValueError:
                # A few by hand (pulled from Table 1)
                if 'J073149' in fname:
                    Rlist.append(Rdicts['HIRES']['C5'])
                    tval = datetime.datetime.strptime('2006-01-04', '%Y-%m-%d')
                elif 'J081435' in fname:
                    Rlist.append(Rdicts['HIRES']['C1'])
                    tval = datetime.datetime.strptime('2006-12-26',
                                                      '%Y-%m-%d')  # 2008 too
                elif 'J095309' in fname:
                    Rlist.append(Rdicts['HIRES']['C1'])
                    tval = datetime.datetime.strptime('2005-03-18', '%Y-%m-%d')
                elif 'J113418' in fname:
                    Rlist.append(Rdicts['HIRES']['C5'])
                    tval = datetime.datetime.strptime('2006-01-05', '%Y-%m-%d')
                elif 'J135706' in fname:
                    Rlist.append(Rdicts['HIRES']['C5'])
                    tval = datetime.datetime.strptime('2007-04-28', '%Y-%m-%d')
                elif 'J155556.9' in fname:
                    Rlist.append(Rdicts['HIRES']['C5'])
                    tval = datetime.datetime.strptime('2005-04-15', '%Y-%m-%d')
                elif 'J212329' in fname:
                    Rlist.append(Rdicts['HIRES']['E3'])
                    tval = datetime.datetime.strptime('2006-08-20', '%Y-%m-%d')
                else:
                    pdb.set_trace()
            else:
                tval = datetime.datetime.strptime(head['DATE-OBS'], '%Y-%m-%d')
            dateobslist.append(datetime.datetime.strftime(tval, '%Y-%m-%d'))
        elif 'ESI' in fname:
            instrlist.append('ESI')
            telelist.append('Keck-II')
            gratinglist.append('ECH')
            try:
                Rlist.append(set_resolution(head))
            except ValueError:
                print("Using R=6,000 for ESI")
                Rlist.append(6000.)
            try:
                tval = datetime.datetime.strptime(head['DATE'], '%Y-%m-%d')
            except KeyError:
                if ('J223438.5' in fname) or ('J231543' in fname):
                    tval = datetime.datetime.strptime('2004-09-11', '%Y-%m-%d')
                else:
                    pdb.set_trace()
            dateobslist.append(datetime.datetime.strftime(tval, '%Y-%m-%d'))
        elif 'MIKE' in fname:  # APPROXIMATE
            if 'MIKEr' in fname:
                instrlist.append('MIKEr')
                gratinglist.append('RED')
            elif 'MIKEb' in fname:
                instrlist.append('MIKEb')
                gratinglist.append('BLUE')
            else:
                instrlist.append('MIKE')
                gratinglist.append('BOTH')
            telelist.append('Magellan')
            sep = full_coord[mt[0]].separation(mike_coord)
            imin = np.argmin(sep)
            if sep[imin] > 1. * u.arcsec:
                pdb.set_trace()
                raise ValueError("Bad separation in MIKE")
            # R and Date
            Rlist.append(25000. / mike_meta['Slit'][imin])
            tval = datetime.datetime.strptime(mike_meta['DATE-OBS'][imin],
                                              '%Y-%b-%d')
            dateobslist.append(datetime.datetime.strftime(tval, '%Y-%m-%d'))
        elif 'MAGE' in fname:  # APPROXIMATE
            instrlist.append('MagE')
            if 'Clay' in head['TELESCOP']:
                telelist.append('Magellan/Clay')
            else:
                telelist.append('Magellan/Baade')
            gratinglist.append('N/A')
            Rlist.append(set_resolution(head))
            dateobslist.append(head['DATE-OBS'])
        else:  # MagE
            raise ValueError("UH OH")
        # Only way to set the dataset correctly
        if chk_meta_only:
            continue
        spec_set[kk] = data

    # Add columns
    meta = meta[full_idx]
    nmeta = len(meta)
    meta.add_column(Column([2000.] * nmeta, name='EPOCH'))
    meta.add_column(Column(npixlist, name='NPIX'))
    meta.add_column(
        Column([str(date) for date in dateobslist], name='DATE-OBS'))
    meta.add_column(Column(wvminlist, name='WV_MIN'))
    meta.add_column(Column(wvmaxlist, name='WV_MAX'))
    meta.add_column(Column(Rlist, name='R'))
    meta.add_column(Column(np.arange(nmeta, dtype=int), name='GROUP_ID'))
    meta.add_column(Column(gratinglist, name='GRATING'))
    meta.add_column(Column(instrlist, name='INSTR'))
    meta.add_column(Column(telelist, name='TELESCOPE'))
    # v02
    meta.rename_column('GRATING', 'DISPERSER')

    # Add HDLLS meta to hdf5
    if chk_meta(meta):
        if chk_meta_only:
            pdb.set_trace()
        hdf[sname]['meta'] = meta
    else:
        raise ValueError("meta file failed")
    # References
    refs = [
        dict(url='http://adsabs.harvard.edu/abs/2015ApJS..221....2P',
             bib='prochaska+15'),
    ]
    jrefs = ltu.jsonify(refs)
    hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
    #
    return
コード例 #15
0
scriptDir = os.environ['SCRIPTDIR']

maskDir = os.path.join(analysisDir, 'CO')
if not os.path.exists(maskDir):
    os.mkdir(maskDir)

otherDataDir = os.path.join(analysisDir, 'ancillary_data')
#otherDataDir = os.environ['OTHERDATA']

# get list of galaxies in degas DR1
degas_table = Table.read(os.path.join(scriptDir, "degas_base.fits"))

# create a column for logging masks.
if 'MASK' in degas_table.colnames:
    degas_table.remove_column('MASK')
degas_table.add_column(Column(np.full_like(degas_table['NAME'], ''),
                              dtype='S15'),
                       name='MASK')

idx_dr1 = degas_table['DR1'] == 1

# Extract list of galaxies via fancy list comprehension

# heracles
heracles_list = [
    os.path.basename(image).split('_')[0] for image in glob.glob(
        os.path.join(otherDataDir, 'heracles', '*gauss15_fixed.fits'))
]

# bima song
bima_list = [
    os.path.basename(image).split('_')[0] for image in glob.glob(
コード例 #16
0
    output_filename = 'fit_features.csv'

#Read in previous table or define new table for writing output
if (args.overwrite == False) and (os.path.exists(output_filename)):
    tbdata = asc.read(output_filename)
else:
    tbdata = Table(names=[
        'name', 'component', 'fwhm', 'absorption', 'min_wave',
        'min_wave_err_L', 'min_wave_err_R', 'pew', 'pew_err', 'filename',
        'redshift'
    ],
                   dtype=('S15', 'i8', 'f8', 'S2', 'f8', 'f8', 'f8', 'f8',
                          'f8', 'S50', 'f8'))
#Add a column for rest wavelength if provided
if (args.rest_wave is not None) and ('rest_wave' not in tbdata.colnames):
    tbdata.add_column(Column(name='rest_wave', dtype='f8'))

#define line name
if args.name is not None:
    name = args.name
else:
    name = 'line1'
    linenum = 1
    while name in tbdata['name']:
        name = 'line{}'.format(linenum)
        linenum += 1

spec_feat.define_feature(
    name,
    absorption=absorption,
    overwrite=True,  #overwrite the yaml files with fit info
コード例 #17
0
ファイル: wjp_cat.py プロジェクト: tskisner/LSS
def lss_catalog(nobj=1):
    """
    Create an empty 'lsscatalog' table.
    
    Parameters
    ----------
    ntarget : :class:`int`
        Number of targets.

    Returns
    -------
    lsscatalog: :class:`astropy.table.Table`
        LSS catalog Table.    
    """
    from astropy.table import Table, Column

    # One row per target.
    lsscatalog = Table()
    
    lsscatalog.add_column(Column(name='TARGETID',        length=nobj, dtype='int64'))

    lsscatalog.add_column(Column(name='RA',              length=nobj, dtype='float64'))
    lsscatalog.add_column(Column(name='DEC',             length=nobj, dtype='float64'))
    
    lsscatalog.add_column(Column(name='DESI_TARGET',     length=nobj, dtype='int64'))
    lsscatalog.add_column(Column(name='BGS_TARGET',      length=nobj, dtype='int64'))
    
    # --  Targeting --
    lsscatalog.add_column(Column(name='IN_IMAGING',      length=nobj, dtype='i2'))    # INSIDE IMAGING FOOTPRINT
    lsscatalog.add_column(Column(name='IN_DESI',         length=nobj, dtype='i2'))    # INSIDE SPEC.   FOOTPRINT (DATE STAMPED ACCEPTABLE TILES)
    lsscatalog.add_column(Column(name='IN_COSMO',        length=nobj, dtype='int64')) # Inside cosmo footprint, e.g. EBV << 1.
    
    lsscatalog.add_column(Column(name='ANG_VETO_FLAG',   length=nobj, dtype='int64'))
    lsscatalog.add_column(Column(name='Z_VETO_FLAG',     length=nobj, dtype='int64'))
    
    # Assignment history.  Priority changs?
    lsscatalog.add_column(Column(name='PRIORITY_INIT',   length=nobj, dtype='float64'))
    lsscatalog.add_column(Column(name='SUBPRIORITY',     length=nobj, dtype='float64'))
        
    lsscatalog.add_column(Column(name='GOOD_FIBERS',     length=nobj, dtype='int64', shape=MAX_NFIBER))
    lsscatalog.add_column(Column(name='GOOD_TILES',      length=nobj, dtype='int64', shape=MAX_NTILE))

    # For each of GOOD_FIBERS, what was the PRIORITY_INIT of the target to which it was assigned.  
    lsscatalog.add_column(Column(name='FIBPRIORITY',     length=nobj, dtype='float64', shape=MAX_NFIBER))
    
    lsscatalog.add_column(Column(name='NGOOD_FIBERS',    length=nobj, dtype='int64'))
    lsscatalog.add_column(Column(name='NGOOD_TILES',     length=nobj, dtype='int64'))
    
    # Effective assignment completeness, non-pairwise.                                                                                                                                                                                 
    lsscatalog.add_column(Column(name='ASSIGN_IIP',      length=nobj, dtype='float64'))
    
    # --  Spectroscopic --
    lsscatalog.add_column(Column(name='Z',               length=nobj, dtype='float64'))
    lsscatalog.add_column(Column(name='ZERR',            length=nobj, dtype='float64'))
    lsscatalog.add_column(Column(name='ZWARN',           length=nobj, dtype='int64'))
    lsscatalog.add_column(Column(name='SPECTYPE',        length=nobj, dtype='S16'))

    lsscatalog.add_column(Column(name='COSMO_BLINDCHI',  length=nobj, dtype='float64'))
    lsscatalog.add_column(Column(name='COSMO_BLINDNZ',   length=nobj, dtype='float64'))
    lsscatalog.add_column(Column(name='COSMO_BLINDWFKP', length=nobj, dtype='float64'))
    
    # 1. / Imaging completeness [0., 1.]  
    lsscatalog.add_column(Column(name='IMAGE_WGHT',      length=nobj, dtype='float64'))

    # 1. / Spectroscopic completeness [0., 1.]
    lsscatalog.add_column(Column(name='SPEC_WGHT',      length=nobj, dtype='float64'))

    initialise_lsscatalog(lsscatalog)

    return  lsscatalog
コード例 #18
0
def quickcat(tilefiles, targets, truth, zcat=None, obsconditions=None, perfect=False):
    """
    Generates quick output zcatalog

    Args:
        tilefiles : list of fiberassign tile files that were observed
        targets : astropy Table of targets
        truth : astropy Table of input truth with columns TARGETID, TRUEZ, and TRUETYPE
        zcat (optional): input zcatalog Table from previous observations
        obsconditions (optional): Table or ndarray with observing conditions from surveysim
        perfect (optional): if True, treat spectro pipeline as perfect with input=output,
            otherwise add noise and zwarn!=0 flags

    Returns:
        zcatalog astropy Table based upon input truth, plus ZERR, ZWARN,
        NUMOBS, and TYPE columns
    """
    #- convert to Table for easier manipulation
    if not isinstance(truth, Table):
        truth = Table(truth)

    #- Count how many times each target was observed for this set of tiles
    print('{} QC Reading {} tiles'.format(asctime(), len(tilefiles)))
    nobs = Counter()
    targets_in_tile = {}
    tileids = list()
    for infile in tilefiles:
        fibassign, header = fits.getdata(infile, 'FIBER_ASSIGNMENTS', header=True)
        tile_id = header['TILEID']
        tileids.append(tile_id)

        ii = (fibassign['TARGETID'] != -1)  #- targets with assignments
        nobs.update(fibassign['TARGETID'][ii])
        targets_in_tile[tile_id] = fibassign['TARGETID'][ii]

    #- Trim obsconditions to just the tiles that were observed
    if obsconditions is not None:
        ii = np.in1d(obsconditions['TILEID'], tileids)
        if np.any(ii == False):
            obsconditions = obsconditions[ii]
        assert len(obsconditions) > 0

    #- Sort obsconditions to match order of tiles
    #- This might not be needed, but is fast for O(20k) tiles and may
    #- prevent future surprises if code expects them to be row aligned
    tileids = np.array(tileids)
    if (obsconditions is not None) and \
       (np.any(tileids != obsconditions['TILEID'])):
        i = np.argsort(tileids)
        j = np.argsort(obsconditions['TILEID'])
        k = np.argsort(i)
        obsconditions = obsconditions[j[k]]
        assert np.all(tileids == obsconditions['TILEID'])

    #- Trim truth down to just ones that have already been observed
    print('{} QC Trimming truth to just observed targets'.format(asctime()))
    obs_targetids = np.array(list(nobs.keys()))
    iiobs = np.in1d(truth['TARGETID'], obs_targetids)
    truth = truth[iiobs]
    targets = targets[iiobs]

    #- Construct initial new z catalog
    print('{} QC Constructing new redshift catalog'.format(asctime()))
    newzcat = Table()
    newzcat['TARGETID'] = truth['TARGETID']
    if 'BRICKNAME' in truth.dtype.names:
        newzcat['BRICKNAME'] = truth['BRICKNAME']
    else:
        newzcat['BRICKNAME'] = np.zeros(len(truth), dtype=(str, 8))

    #- Copy TRUETYPE -> SPECTYPE so that we can change without altering original
    newzcat['SPECTYPE'] = truth['TRUESPECTYPE'].copy()

    #- Add ZERR and ZWARN
    print('{} QC Adding ZERR and ZWARN'.format(asctime()))
    nz = len(newzcat)
    if perfect:
        newzcat['Z'] = truth['TRUEZ'].copy()
        newzcat['ZERR'] = np.zeros(nz, dtype=np.float32)
        newzcat['ZWARN'] = np.zeros(nz, dtype=np.int32)
    else:
        # get the observational conditions for the current tilefiles
        if obsconditions is None:
            obsconditions = get_median_obsconditions(tileids)

        # get the redshifts
        z, zerr, zwarn = get_observed_redshifts(targets, truth, targets_in_tile, obsconditions)
        newzcat['Z'] = z  #- update with noisy redshift
        newzcat['ZERR'] = zerr
        newzcat['ZWARN'] = zwarn

    #- Add numobs column
    print('{} QC Adding NUMOBS column'.format(asctime()))
    newzcat.add_column(Column(name='NUMOBS', length=nz, dtype=np.int32))
    for i in range(nz):
        newzcat['NUMOBS'][i] = nobs[newzcat['TARGETID'][i]]

    #- Merge previous zcat with newzcat
    print('{} QC Merging previous zcat'.format(asctime()))
    if zcat is not None:
        #- don't modify original
        #- Note: this uses copy on write for the columns to be memory
        #- efficient while still letting us modify a column if needed
        zcat = zcat.copy()

        #- targets that are in both zcat and newzcat
        repeats = np.in1d(zcat['TARGETID'], newzcat['TARGETID'])

        #- update numobs in both zcat and newzcat
        ii = np.in1d(newzcat['TARGETID'], zcat['TARGETID'][repeats])
        orig_numobs = zcat['NUMOBS'][repeats].copy()
        new_numobs = newzcat['NUMOBS'][ii].copy()
        zcat['NUMOBS'][repeats] += new_numobs
        newzcat['NUMOBS'][ii] += orig_numobs

        #- replace only repeats that had ZWARN flags in original zcat
        #- replace in new
        replace = repeats & (zcat['ZWARN'] != 0)
        jj = np.in1d(newzcat['TARGETID'], zcat['TARGETID'][replace])
        zcat[replace] = newzcat[jj]

        #- trim newzcat to ones that shouldn't override original zcat
        discard = np.in1d(newzcat['TARGETID'], zcat['TARGETID'])
        newzcat = newzcat[~discard]

        #- Should be non-overlapping now
        assert np.all(np.in1d(zcat['TARGETID'], newzcat['TARGETID']) == False)

        #- merge them
        newzcat = vstack([zcat, newzcat])

    #- check for duplicates
    targetids = newzcat['TARGETID']
    assert len(np.unique(targetids)) == len(targetids)

    #- Metadata for header
    newzcat.meta['EXTNAME'] = 'ZCATALOG'

    print('{} QC done'.format(asctime()))
    return newzcat
コード例 #19
0
ファイル: test_ecsv.py プロジェクト: Gabriel-p/astropy
    'uint64', 'float16', 'float32', 'float64', 'float128', 'str'
]
if os.name == 'nt' or sys.maxsize <= 2**32:
    DTYPES.remove('float128')

T_DTYPES = Table()

for dtype in DTYPES:
    if dtype == 'bool':
        data = np.array([False, True, False])
    elif dtype == 'str':
        data = np.array(['ab 0', 'ab, 1', 'ab2'])
    else:
        data = np.arange(3, dtype=dtype)
    c = Column(data,
               unit='m / s',
               description='descr_' + dtype,
               meta={'meta ' + dtype: 1})
    T_DTYPES[dtype] = c

T_DTYPES.meta['comments'] = ['comment1', 'comment2']

# Corresponds to simple_table()
SIMPLE_LINES = [
    '# %ECSV 1.0', '# ---', '# datatype:', '# - {name: a, datatype: int64}',
    '# - {name: b, datatype: float64}', '# - {name: c, datatype: string}',
    '# schema: astropy-2.0', 'a b c', '1 1.0 c', '2 2.0 d', '3 3.0 e'
]


def test_write_simple():
    """
コード例 #20
0
mpl.rc("font", family="serif", size=16)
mpl.rc("axes", linewidth = 1.0)
mpl.rc("lines", linewidth = 1.0)
mpl.rc("xtick.major", pad = 8, width = 1)
mpl.rc("ytick.major", pad = 8, width = 1)
mpl.rc("xtick.minor", width = 1)
mpl.rc("ytick.minor", width = 1)

hmscFullFile = './hmscList_full_20161218.txt'
hmscFull = ascii.read(hmscFullFile)
hmscFull = hmscFull[hmscFull['Dist_B'].mask == False]

coord = coords.SkyCoord(hmscFull['ra'].data, hmscFull['dec'].data, 
	frame = 'fk5', unit = (u.deg, u.deg))

lDet = Column(coord.galactic.l.rad, name = 'lDet')
bDet = Column(coord.galactic.b.rad, name = 'bDet')

xDet = hmscFull['Dist_B']*np.cos(bDet)*np.sin(lDet)
yDet = 8.34 - hmscFull['Dist_B']*np.cos(bDet)*np.cos(lDet)

#ap.make_rgb_cube(['./mwRed.fits',
#	'./mwGreen.fits',
#	'./mwBlue.fits'],
#	'./mwRGB.fits', system = 'GAL')
#
#ap.make_rgb_image('./mwRGB.fits',
#	'./mwRGB.png',embed_avm_tags = False)

fig = plt.figure(figsize = (8,8))
コード例 #21
0
    def process(self):
        self.info('Processing has been started')
        for image in self.images_list:
            self.info('Processing image: {}'.format(image.name))
            apertures, sigma_values_table = self._create_apertures(
                image, self.stars_coordinates[str(image.savart)], image.shape)

            out_table = []
            counts_tab = []
            counts_error_tab = []

            for aperture, sigma_value in zip(apertures, sigma_values_table):
                rawflux_table = aperture_photometry(image.data, aperture[0])
                bkgflux_table = aperture_photometry(image.data, aperture[1])
                phot_table = hstack([rawflux_table, bkgflux_table],
                                    table_names=['raw', 'bkg'])

                if self.config_section.get('bkg_annulus'):
                    self.info(
                        'Mean background value from annulus has been used.')
                    bkg_mean = phot_table['aperture_sum_bkg'] / aperture[
                        1].area()
                    bkg_sum = bkg_mean * aperture[0].area()
                    self.info('Mean background value\n {}'.format(bkg_mean))
                else:
                    self.info('Mean background value from mask \
                        sigma clipped stats has been used.')
                    bkg_mean = sigma_value.median
                    bkg_sum = bkg_mean * aperture[0].area()
                    self.info('Mean background value\n {}'.format(bkg_mean))

                final_sum = phot_table['aperture_sum_raw'] - bkg_sum
                phot_table['residual_aperture_sum'] = final_sum

                final_sum_error = self._calc_phot_error(
                    image.hdr, aperture, phot_table, bkgflux_table,
                    sigma_value.std)

                phot_table.add_column(
                    Column(name='residual_aperture_err_sum',
                           data=[final_sum_error]))

                phot_table['xcenter_raw'].shape = 1
                phot_table['ycenter_raw'].shape = 1
                phot_table['xcenter_bkg'].shape = 1
                phot_table['ycenter_bkg'].shape = 1

                out_table.append(phot_table)
                counts_tab.append(final_sum)
                counts_error_tab.append(final_sum_error)

            out_table = vstack(out_table)

            # self.measurements.append(
            #     SavartCounts(image.savart, image.jd,
            #         counts_tab, counts_error_tab))
            if image.savart not in self.measurements:
                self.measurements[image.savart] = [
                    SavartCounts(image.savart, image.jd, counts_tab,
                                 counts_error_tab)
                ]
            else:
                self.measurements[image.savart].append(
                    SavartCounts(image.savart, image.jd, counts_tab,
                                 counts_error_tab))

            if self.config_section.get('plot_images'):
                self._make_image_plot(image.data, apertures, image.name)

            self._save_image_output(out_table, image.name + '.csv')

        self.measurements = OrderedDict(sorted(self.measurements.items()))
        self._save_polarizaton_results()
コード例 #22
0
ファイル: scrape_qc.py プロジェクト: jrnorth/mwa-capstone
    t = Table(meta={'name': k, 'current': astropy.time.Time.now().iso})
    for i in xrange(len(column_data)):
        c = column_data[i]
        if '[' in c['type']:
            l = int(c['type'].split('[')[1].replace(']', ''))
            d = c['type'].split('[')[0]
        else:
            d = c['type']
            l = 1
        if d == 'object':
            print 'Do not know how to handle this column:'
            print '\t%s' % c['column']
            continue
        if c['type'].startswith('S'):
            columns[c['column']] = Column(name=c['column'],
                                          dtype=d,
                                          length=len(rows))

        else:
            if c['unit'] != 'None':
                columns[c['column']] = Column(name=c['column'],
                                              dtype=d,
                                              unit=c['unit'],
                                              shape=(l, ),
                                              length=len(rows))
            else:
                columns[c['column']] = Column(name=c['column'],
                                              dtype=d,
                                              shape=(l, ),
                                              length=len(rows))
コード例 #23
0
import sunpy.data.test
from sunpy.io.special import srs

testpath = sunpy.data.test.rootdir

filenames = [{'file': '20150906SRS.txt', 'rows': 5},
             {'file': '20150306SRS.txt', 'rows': 4},
             {'file': '20150101SRS.txt', 'rows': 9}]

COORDINATES = [{'text': 'N10W05', 'latitude': 10, 'longitude': 5},
               {'text': 'N89E00', 'latitude': 89, 'longitude': 0},
               {'text': 'S33E02', 'latitude': -33, 'longitude': -2},
               {'text': 'S01', 'latitude': -1, 'longitude': None}]

LOCATION = Column(data=[x['text'] for x in COORDINATES], name='Location')
LONGLAT = Table()
LONGLAT.add_column(MaskedColumn(data=[x['longitude'] for x in COORDINATES], name='Longitude',
                                unit=u.deg, mask=True))
LONGLAT.add_column(MaskedColumn(data=[x['latitude'] for x in COORDINATES], name='Latitude',
                                unit=u.deg))


@pytest.mark.parametrize("path, number_of_rows",
                         [(os.path.join(testpath, elem['file']), elem['rows'])
                          for elem in filenames])
def test_number_of_rows(path, number_of_rows):
    table = srs.read_srs(path)
    assert len(table) == number_of_rows

コード例 #24
0
                            'oiii_5007_flux', 'h_beta_flux')

new_column = []
for i in range(
        len(x_real)
):  # for all values check if they lie below or above semiemperical line defined by vo_line_X, where X is n[ii], s[ii] or o[i]
    if y_real[i] > plot.vo_line_n(x_real[i]) or x_real[i] > 0.3:
        new_column.append(
            'AGN'
        )  # below semiemperical line are classified as 'galaxies hosting an AGN'
    else:
        new_column.append(
            'SBG'
        )  # above semiemperical line galaxies are classified as 'Starburst galaxies'

aa = Column(new_column, name='classification')
data_table.add_column(aa, index=0)  # Insert before the first table column

ascii.write(
    data_table,
    output=
    '/Users/users/mulder/astrods/project/sample_trainingsetwithclass.csv',
    format='csv',
    overwrite=True)
'''
plot.plot_vo87(data,'nii_6584_flux', 'h_alpha_flux', 'oiii_5007_flux', 'h_beta_flux', plot.vo_line_n, name='nii_test')
plot_vo87(data,'nii_6584_flux', 'h_alpha_flux', 'oiii_5007_flux', 'h_beta_flux', vo_line_n, name='nii')
plot_vo87(data,'sii_6717_flux', 'h_alpha_flux', 'oiii_5007_flux', 'h_beta_flux', vo_line_s, name='sii')
plot_vo87(data,'oi_6300_flux', 'h_alpha_flux', 'oiii_5007_flux', 'h_beta_flux', vo_line_o, name='oi')

h_beta_flux
コード例 #25
0
    #bgfn = paths.merge_spath("{name}_spw{ii}_background_mean_7m12m.fits")

    data[name] = spectral_overlays.spectral_overlays(fn, name=name,
                                                     freq_name_mapping=freq_name_mapping,
                                                     frequencies=frequencies,
                                                     yoffset=yoffset,
                                                     minvelo=minvelo,
                                                     maxvelo=maxvelo,
                                                     suffix="_7m12m_hires",
                                                     #background_fn=bgfn,
                                                    )

firstentry = list(data.keys())[0]
colnames = list(data[firstentry].keys())
coltypes = {k:type(data[firstentry][k]) for k in colnames}
names = Column([name for name in data], name='SourceID')
data_list = [Column(u.Quantity([data[name][key] for name in names]), name=key)
             if coltypes[key] not in (str,)
             else Column([data[name][key] for name in names], name=key)
             for key in colnames]
data_list.insert(0, names)


tbl = Table(data_list)
tbl.sort('SourceID')
tbl.write(paths.tpath("core_velocities_7m12m_hires.ipac"), format="ascii.ipac",
          overwrite=True)


# 12m only
data = {}
コード例 #26
0
    # if you want the fits file name only without the full path then
    # fitsNames.append(os.path.split(fitsName)[1])

# Create a table container. 
# http://docs.astropy.org/en/stable/table/construct_table.html
# One trick is to use the data types in the first "values" to let astropy guess datatypes.
# to use this trick, you need to specify the column names in the table
row0 = [dict(zip(keys, values[0]))]
t = Table(row0, names=keys)

# now add all the other rows. again, because dict didn't preserve column order, you have to repeat
# the dict here.
for i in range(1, len(values)):
    t.add_row(values[i])

# add the filenames column
#t.add_column
new_column = Column(name='fitsName', data=fitsNames)
t.add_column(new_column, 0)

# save the file
# http://docs.astropy.org/en/stable/table/io.html
t.write('table.dat', format='ascii.ipac')


# In[ ]:




コード例 #27
0
ファイル: core.py プロジェクト: ruizca/astroquery
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid': [], 'URL': [], 'size': []}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if (len(tds) > 1 and 'uid' in tds[0].text
                    and (cl and 'Level' in tr['class'][0])):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("Heading was found when parsing the download "
                                 "page but it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size, unit = re.search(r'(-|[0-9\.]*)([A-Za-z]*)',
                                       tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png'
                              in tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB', 'MB') else
                            u.Unit('kB') if 'kb' in unit.lower() else 1)
                    try:
                        columns['size'].append(float(size) * u.Unit(unit))
                    except ValueError:
                        # size is probably a string?
                        columns['size'].append(-1 * u.byte)
                    log.log(level=5,
                            msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}".format(
                                size, uid, columns['URL'][-1]))
                else:
                    log.warning("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size, unit = re.search(r'([0-9\.]*)([A-Za-z]*)',
                                       tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB', 'MB') else
                        u.Unit('kB') if 'kb' in unit.lower() else 1)
                columns['size'].append(float(size) * u.Unit(unit))
                log.log(level=5,
                        msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(
                            size, uid, columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError(
                "No valid UIDs were found in the staged data table. "
                "Please include {0} in a bug report.".format(
                    self._staging_log['data_list_url']))

        tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])

        return tbl
コード例 #28
0
ファイル: test_sdss.py プロジェクト: welterde/astroquery
    raise requests.exceptions.Timeout('timeout')


def data_path(filename):
    data_dir = os.path.join(os.path.dirname(__file__), 'data')
    return os.path.join(data_dir, filename)


# Test Case: A Seyfert 1 galaxy
coords = commons.ICRSCoordGenerator('0h8m05.63s +14d50m23.3s')

# Test Case: list of coordinates
coords_list = [coords, coords]

# Test Case: Column of coordinates
coords_column = Column(coords_list, name='coordinates')


def test_sdss_spectrum(patch_get, patch_get_readable_fileobj, coords=coords):
    xid = sdss.core.SDSS.query_region(coords, spectro=True)
    sp = sdss.core.SDSS.get_spectra(matches=xid)


def test_sdss_spectrum_mjd(patch_get, patch_get_readable_fileobj):
    sp = sdss.core.SDSS.get_spectra(plate=2345, fiberID=572)


def test_sdss_spectrum_coords(patch_get,
                              patch_get_readable_fileobj,
                              coords=coords):
    sp = sdss.core.SDSS.get_spectra(coords)
コード例 #29
0
                         alpha=0.2)

        ax1.set_ylabel(r'$\Gamma/ \Delta^2 \ {\rm 1/Myr}$', size=20)
        ax1.set_xlabel(r'Scale [pc]', size=20)
        ax1.set_xscale('log')
        ax1.legend(loc='upper right', prop={'size': 15}, ncol=2)
        f1.savefig('Plots/' + name_file +
                   '_%05d_percentiles.png' % int(radius))
        plt.close(f1)

        os.system('rclone copy Plots/' + name_file +
                  '_%05d_percentiles.png uchile:Double_Power/Percentiles/' %
                  int(radius))

        tabla = Table()
        tabla['resolution'] = Column(np.array(res),
                                     description='resolution in pc')

        tabla['p10'] = Column(np.array(p10))
        tabla['p20'] = Column(np.array(p20))
        tabla['p30'] = Column(np.array(p30))
        tabla['p40'] = Column(np.array(p40))
        tabla['p50'] = Column(np.array(p50))
        tabla['p60'] = Column(np.array(p60))
        tabla['p70'] = Column(np.array(p70))
        tabla['p80'] = Column(np.array(p80))
        tabla['p90'] = Column(np.array(p90))

        tabla['q10'] = Column(np.array(q10 + delta))
        tabla['q20'] = Column(np.array(q20 + delta))
        tabla['q30'] = Column(np.array(q30 + delta))
        tabla['q40'] = Column(np.array(q40 + delta))
コード例 #30
0
# interpolates projected size on the sky
radius_2_arcsec = interp1d(n.arange(0.00001,6.5,0.001), lc_setup.cosmoMD.arcsec_per_kpc_proper( n.arange(0.00001,6.5,0.001) ).value)

radius_dev = re_dev(stellar_mass)* radius_2_arcsec(f1['/sky_position/redshift_R'].value[gal][ok])
radius_exp = re_exp(stellar_mass)* radius_2_arcsec(f1['/sky_position/redshift_R'].value[gal][ok])

radius = radius_exp

dm_dev = -2.5*n.log10(f_14_dev(radius))
dm_exp = -2.5*n.log10(f_14_exp(radius))
# initialize the fibermag with the exponential profile
fiber_mag = rmag + dm_exp
"""

t = Table()
t.add_column(Column(name='SMHMR_mass', data=mass, unit='log10(Msun)'))
t.add_column(
    Column(name='star_formation_rate', data=log_sfr, unit='log10(Msun/yr)'))
t.add_column(Column(name='is_quiescent', data=QU, unit=''))
t.add_column(Column(name='LX_hard', data=n.log10(gal_LX), unit='log10(erg/s)'))
t.add_column(Column(name='mag_abs_r', data=n.zeros_like(mass), unit='mag'))
t.add_column(Column(name='mag_r', data=n.zeros_like(mass), unit='mag'))

t.write(path_2_galaxy_file, overwrite=True)
print('done', time.time() - t0, 's')

### Option: FIGURES ###
if make_figure:
    import matplotlib
    matplotlib.use('Agg')
    matplotlib.rcParams.update({'font.size': 14})
コード例 #31
0
ファイル: templates.py プロジェクト: akremin/desisim
    def make_templates(self, zrange=(0.5,1.1), zmagrange=(19.0,20.5),
                       no_colorcuts=False):
        """Build Monte Carlo set of LRG spectra/templates.

        This function chooses random subsets of the LRG continuum spectra and
        finally normalizes the spectrum to a specific z-band magnitude.

        TODO (@moustakas): add a LINER- or AGN-like emission-line spectrum 

        Args:
          zrange (float, optional): Minimum and maximum redshift range.  Defaults
            to a uniform distribution between (0.5,1.1).
          zmagrange (float, optional): Minimum and maximum DECam z-band (AB)
            magnitude range.  Defaults to a uniform distribution between (19,20.5).
          no_colorcuts (bool, optional): Do not apply the fiducial rzW1 color-cuts
            cuts (default False).
        
        Returns:
          outflux (numpy.ndarray): Array [nmodel,npix] of observed-frame spectra [erg/s/cm2/A]. 
          meta (astropy.Table): Table of meta-data for each output spectrum [nmodel].

        Raises:

        """
        from astropy.table import Table, Column
        from desisim.io import write_templates
        from desispec.interpolation import resample_flux

        # Initialize the output flux array and metadata Table.
        outflux = np.zeros([self.nmodel,len(self.wave)]) # [erg/s/cm2/A]

        meta = Table()
        meta['TEMPLATEID'] = Column(np.zeros(self.nmodel,dtype='i4'))
        meta['REDSHIFT'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['GMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['RMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['W1MAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMETAL'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['AGE'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['D4000'] = Column(np.zeros(self.nmodel,dtype='f4'))

        meta['AGE'].unit = 'Gyr'

        comments = dict(
            TEMPLATEID = 'template ID',
            REDSHIFT = 'object redshift',
            GMAG = 'DECam g-band AB magnitude',
            RMAG = 'DECam r-band AB magnitude',
            ZMAG = 'DECam z-band AB magnitude',
            W1MAG = 'WISE W1-band AB magnitude',
            ZMETAL = 'stellar metallicity',
            AGE = 'time since the onset of star formation',
            D4000 = '4000-Angstrom break'
        )

        nobj = 0
        nbase = len(self.basemeta)
        nchunk = min(self.nmodel,500)

        Cuts = TargetCuts()
        while nobj<=(self.nmodel-1):
            # Choose a random subset of the base templates
            chunkindx = self.rand.randint(0,nbase-1,nchunk)

            # Assign uniform redshift and z-magnitude distributions.
            redshift = self.rand.uniform(zrange[0],zrange[1],nchunk)
            zmag = self.rand.uniform(zmagrange[0],zmagrange[1],nchunk)

            # Unfortunately we have to loop here.
            for ii, iobj in enumerate(chunkindx):
                zwave = self.basewave*(1.0+redshift[ii])
                restflux = self.baseflux[iobj,:] # [erg/s/cm2/A @10pc]

                znorm = 10.0**(-0.4*zmag[ii])/self.zfilt.get_maggies(zwave,restflux)
                flux = restflux*znorm # [erg/s/cm2/A, @redshift[ii]]

                # [grzW1]flux are in nanomaggies
                zflux = 10.0**(-0.4*(zmag[ii]-22.5))                      
                gflux = self.gfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                rflux = self.rfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                w1flux = self.w1filt.get_maggies(zwave,flux)*10**(0.4*22.5) 

                if no_colorcuts:
                    rzW1mask = [True]
                else:
                    rzW1mask = [Cuts.LRG(rflux=rflux,zflux=zflux,w1flux=w1flux)]

                if all(rzW1mask):
                    if ((nobj+1)%10)==0:
                        print('Simulating {} template {}/{}'.format(self.objtype,nobj+1,self.nmodel))
                    outflux[nobj,:] = resample_flux(self.wave,zwave,flux)

                    meta['TEMPLATEID'][nobj] = nobj
                    meta['REDSHIFT'][nobj] = redshift[ii]
                    meta['GMAG'][nobj] = -2.5*np.log10(gflux)+22.5
                    meta['RMAG'][nobj] = -2.5*np.log10(rflux)+22.5
                    meta['ZMAG'][nobj] = zmag[ii]
                    meta['W1MAG'][nobj] = -2.5*np.log10(w1flux)+22.5
                    meta['ZMETAL'][nobj] = self.basemeta['ZMETAL'][iobj]
                    meta['AGE'][nobj] = self.basemeta['AGE'][iobj]
                    meta['D4000'][nobj] = self.basemeta['AGE'][iobj]

                    nobj = nobj+1

                # If we have enough models get out!
                if nobj>=(self.nmodel-1):
                    break

        return outflux, self.wave, meta
コード例 #32
0
ファイル: sky.py プロジェクト: neXyon/pynephoscope
class SkyCatalog:
	def __init__(self, sun_only = False, moon_only = False):
		if sun_only:
			self.ephemerides = [ephem.Sun()]
			self.data = None
		elif moon_only:
			self.ephemerides = [ephem.Moon()]
			self.data = None
		else:
			self.ephemerides = [ephem.Venus(), ephem.Mars(), ephem.Jupiter(), ephem.Saturn(), ephem.Moon(), ephem.Sun()]
			
			self.data = ascii.read(Configuration.star_catalog_file, guess=False, format='fixed_width_no_header', names=('HR', 'Name', 'DM', 'HD', 'SAO', 'FK5', 'IRflag', 'r_IRflag', 'Multiple', 'ADS', 'ADScomp', 'VarID', 'RAh1900', 'RAm1900', 'RAs1900', 'DE-1900', 'DEd1900', 'DEm1900', 'DEs1900', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'GLON', 'GLAT', 'Vmag', 'n_Vmag', 'u_Vmag', 'B-V', 'u_B-V', 'U-B', 'u_U-B', 'R-I', 'n_R-I', 'SpType', 'n_SpType', 'pmRA', 'pmDE', 'n_Parallax', 'Parallax', 'RadVel', 'n_RadVel', 'l_RotVel', 'RotVel', 'u_RotVel', 'Dmag', 'Sep', 'MultID', 'MultCnt', 'NoteFlag'), col_starts=(0, 4, 14, 25, 31, 37, 41, 42, 43, 44, 49, 51, 60, 62, 64, 68, 69, 71, 73, 75, 77, 79, 83, 84, 86, 88, 90, 96, 102, 107, 108, 109, 114, 115, 120, 121, 126, 127, 147, 148, 154, 160, 161, 166, 170, 174, 176, 179, 180, 184, 190, 194, 196), col_ends=(3, 13, 24, 30, 36, 40, 41, 42, 43, 48, 50, 59, 61, 63, 67, 68, 70, 72, 74, 76, 78, 82, 83, 85, 87, 89, 95, 101, 106, 107, 108, 113, 114, 119, 120, 125, 126, 146, 147, 153, 159, 160, 165, 169, 173, 175, 178, 179, 183, 189, 193, 195, 196))
			
			# removed masked rows
			
			self.data = self.data[:][~np.ma.getmaskarray(self.data['DE-'])]
		
	def setLocation(self, location):
		self.location = location
		
	def setTime(self, time):
		self.time = time
		
	def calculate(self):
		ephem_location = ephem.Observer()
		ephem_location.lat = self.location.latitude.to(u.rad) / u.rad
		ephem_location.lon = self.location.longitude.to(u.rad) / u.rad
		ephem_location.elevation = self.location.height / u.meter
		ephem_location.date = ephem.Date(self.time.datetime)

		if self.data is None:
			self.alt = Latitude([], unit=u.deg)
			self.az = Longitude([], unit=u.deg)
			self.names = Column([], dtype=np.str)
			self.vmag = Column([])
		else:
			ra = Longitude((self.data['RAh'], self.data['RAm'], self.data['RAs']), u.h)
			dec = Latitude((np.core.defchararray.add(self.data['DE-'], self.data['DEd'].astype(str)).astype(int), self.data['DEm'], self.data['DEs']), u.deg)
			c = SkyCoord(ra, dec, frame='icrs')
			altaz = c.transform_to(AltAz(obstime=self.time, location=self.location))
			self.alt = altaz.alt
			self.az = altaz.az

			self.names = self.data['Name']
			self.vmag = self.data['Vmag']

		for ephemeris in self.ephemerides:
			ephemeris.compute(ephem_location)
			self.vmag = self.vmag.insert(0, ephemeris.mag)
			self.alt = self.alt.insert(0, (ephemeris.alt.znorm * u.rad).to(u.deg))
			self.az = self.az.insert(0, (ephemeris.az * u.rad).to(u.deg))
			self.names = self.names.insert(0, ephemeris.name)
		
		return self.names, self.vmag, self.alt, self.az

	def filter(self, min_alt, max_mag):
		show = self.alt >= min_alt

		names = self.names[show]
		vmag = self.vmag[show]
		alt = self.alt[show]
		az = self.az[show]

		show_mags = vmag < max_mag

		names = names[show_mags]
		vmag = vmag[show_mags]
		alt = alt[show_mags]
		az = az[show_mags]
		
		return names, vmag, alt, az
コード例 #33
0
ファイル: tableio.py プロジェクト: nudomarinero/LSMTool
def createTable(outlines, metaDict, colNames, colDefaults):
    """
    Creates an astropy table from inputs.

    Parameters
    ----------
    outlines : list of str
        Input lines
    metaDict : dict
        Input meta data
    colNames : list of str
        Input column names
    colDefaults : list
        Input column default values

    Returns
    -------
    table : astropy.table.Table object

    """
    # Before loading table into an astropy Table object, set lengths of Name,
    # Patch, and Type columns to 100 characters
    log = logging.getLogger('LSMTool.Load')

    converters = {}
    nameCol = 'col{0}'.format(colNames.index('Name')+1)
    converters[nameCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    typeCol = 'col{0}'.format(colNames.index('Type')+1)
    converters[typeCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    if 'Patch' in colNames:
        patchCol = 'col{0}'.format(colNames.index('Patch')+1)
        converters[patchCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]

    log.debug('Creating table...')
    table = Table.read('\n'.join(outlines), guess=False, format='ascii.no_header', delimiter=',',
        names=colNames, comment='#', data_start=0, converters=converters)

    # Convert spectral index values from strings to arrays.
    if 'SpectralIndex' in table.keys():
        log.debug('Converting spectral indices...')
        specOld = table['SpectralIndex'].data.tolist()
        specVec = []
        maskVec = []
        maxLen = 0
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    maxLen = 1
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    if len(specEntry) > maxLen:
                        maxLen = len(specEntry)
            except:
                pass
        log.debug('Maximum number of spectral-index terms in model: {0}'.format(maxLen))
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    specEntry = [float(l)]
                    specMask = [False]
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    specMask = [False] * len(specEntry)
                while len(specEntry) < maxLen:
                    specEntry.append(0.0)
                    specMask.append(True)
                specVec.append(specEntry)
                maskVec.append(specMask)
            except:
                specVec.append([0.0]*maxLen)
                maskVec.append([True]*maxLen)
        specCol = MaskedColumn(name='SpectralIndex', data=np.array(specVec, dtype=np.float))
        specCol.mask = maskVec
        specIndx = table.keys().index('SpectralIndex')
        table.remove_column('SpectralIndex')
        table.add_column(specCol, index=specIndx)

    # Convert RA and Dec to Angle objects
    log.debug('Converting RA...')
    RARaw = table['Ra'].data.tolist()
    RACol = Column(name='Ra', data=RA2Angle(RARaw))
    def raformat(val):
        return Angle(val, unit='degree').to_string(unit='hourangle', sep=':')
    RACol.format = raformat
    RAIndx = table.keys().index('Ra')
    table.remove_column('Ra')
    table.add_column(RACol, index=RAIndx)

    log.debug('Converting Dec...')
    DecRaw = table['Dec'].data.tolist()
    DecCol = Column(name='Dec', data=Dec2Angle(DecRaw))
    def decformat(val):
        return Angle(val, unit='degree').to_string(unit='degree', sep='.')
    DecCol.format = decformat
    DecIndx = table.keys().index('Dec')
    table.remove_column('Dec')
    table.add_column(DecCol, index=DecIndx)

    def fluxformat(val):
        return '{0:0.3f}'.format(val)
    table.columns['I'].format = fluxformat

    # Set column units and default values
    for i, colName in enumerate(colNames):
        log.debug("Setting units for column '{0}' to {1}".format(
            colName, allowedColumnUnits[colName.lower()]))
        table.columns[colName].unit = allowedColumnUnits[colName.lower()]

        if hasattr(table.columns[colName], 'filled') and colDefaults[i] is not None:
            fillVal = colDefaults[i]
            if colName == 'SpectralIndex':
                while len(fillVal) < maxLen:
                    fillVal.append(0.0)
            log.debug("Setting default value for column '{0}' to {1}".
                format(colName, fillVal))
            table.columns[colName].fill_value = fillVal
    table.meta = metaDict

    return table
コード例 #34
0
ファイル: parse.py プロジェクト: linetools/linetools
def line_data(nrows=1):
    """ Defines the dict (and/or Table) for spectral line Data

    Parameters
    ----------
    nrows : int, optional
      Number of rows in Table [default = 1]

    Notes
    -----
    Group definition:
       *    0: None
       *    1: "All" ISM (intended to be all atomic lines ever observed)
       *    2: Strong ISM
       *    4: HI Lyman series
       *    8: H2
       *   16: CO
       *   32: EUV
       *   64: Galaxy Emission
       *  128: Galaxy Absorption
       *  256: AGN
       *  512: ??
       * 1024: User1 (Reserved)
       * 2048: User2 (Reserved)
    """
    ldict = {
        'name': ' '*20,       # Name
        'wrest': 0.*u.AA,     # Rest Wavelength (Quantity)
        'f':  0.,             # Oscillator strength
        'Ej': 0./u.cm,        # Energy of lower level (relative to ground state)
        'Ek': 0./u.cm,        # Energy of upper level (relative to ground state)
        'Ex': 0./u.cm,        # Excitation energy (cm^-1)
        'A': 0./u.s,          # Einstein coefficient
        'gj': 0,              # Lower statistical weight (2J+1)
        'gk': 0,              # Upper statistical weight (2J+1)
        'gamma': 0./u.s,      # Sum of A
        'nj': 0,              # Orbital level of lower state (or vibrational level)
        'nk': 0,              # Orbital level of upper state (or vibrational level)
        'Jj': 0.,             # Tot ang mom (z projection) of lower state (or rotation level)
        'Jk': 0.,             # Tot ang mom (z projection) of upper state (or rotation level)
        'el': 0,              # Electronic transition (2=Lyman (B-X), 3=Werner (C-X)) 
        'Z': 0,               # Atomic number (for atoms)
        'Am': 0,              # Mass number (often written as "A"; only used for D)
        'ion': 0,             # Ionic state (1=Neutral)
        'mol': ' '*10,        # Molecular name (H2, HD, CO, C13O)
        'Ref': ' '*50,        # References
        'group': 0            # Flag for grouping
        }

    # Table
    clms = []
    for key in ldict.keys():
        if type(ldict[key]) is Quantity:
            clm = Column( ([ldict[key].value]*nrows), name=key)
            clm.unit = ldict[key].unit
        else:
            clm = Column( [ldict[key]]*nrows, name=key)
        # Append
        clms.append(clm)

    # make it a masked Table so we can deal with Galaxy
    # emission and ISM absorption simultaneously by masking
    # out what does not make sense in one case or the other
    tbl = Table(clms, masked=True)

    return ldict, tbl
コード例 #35
0
def read_samples(filename, path=None, tablename=POSTERIOR_SAMPLES):
    """Read an HDF5 sample chain file.

    Parameters
    ----------
    filename : str
        The path of the HDF5 file on the filesystem.
    path : str, optional
        The path of the dataset within the HDF5 file.
    tablename : str, optional
        The name of table to search for recursively within the HDF5 file.
        By default, search for 'posterior_samples'.

    Returns
    -------
    chain : `astropy.table.Table`
        The sample chain as an Astropy table.

    Examples
    --------
    Test reading a file written using the Python API:

    >>> import os.path
    >>> import tempfile
    >>> table = Table([
    ...     Column(np.ones(10), name='foo', meta={'vary': FIXED}),
    ...     Column(np.arange(10), name='bar', meta={'vary': LINEAR}),
    ...     Column(np.arange(10) * np.pi, name='bat', meta={'vary': CIRCULAR}),
    ...     Column(np.arange(10), name='baz', meta={'vary': OUTPUT})
    ... ])
    >>> with tempfile.TemporaryDirectory() as dir:
    ...     filename = os.path.join(dir, 'test.hdf5')
    ...     write_samples(table, filename, path='foo/bar/posterior_samples')
    ...     len(read_samples(filename))
    10

    Test reading a file that was written using the LAL HDF5 C API:

    >>> from pkg_resources import resource_filename
    >>> filename = resource_filename(__name__, 'tests/data/test.hdf5')
    >>> table = read_samples(filename)
    >>> table.colnames
    ['uvw', 'opq', 'lmn', 'ijk', 'def', 'abc', 'ghi', 'rst']

    """
    with h5py.File(filename, 'r') as f:
        if path is not None:  # Look for a given path
            table = f[path]
        else:  # Look for a given table name
            table = _find_table(f, tablename)
        table = Table.read(table)

    # Restore vary types.
    for i, column in enumerate(table.columns.values()):
        column.meta['vary'] = table.meta['FIELD_{0}_VARY'.format(i)]

    # Restore fixed columns from table attributes.
    for key, value in table.meta.items():
        # Skip attributes from H5TB interface
        # (https://www.hdfgroup.org/HDF5/doc/HL/H5TB_Spec.html).
        if key == 'CLASS' or key == 'VERSION' or key == 'TITLE' or \
                key.startswith('FIELD_'):
            continue
        table.add_column(Column([value] * len(table), name=key,
                         meta={'vary': FIXED}))

    # Delete remaining table attributes.
    table.meta.clear()

    # Normalize column names.
    _remap_colnames(table)

    # Done!
    return table
コード例 #36
0
ファイル: flux_sensitivity.py プロジェクト: tuoyl/fermipy
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    sedshape = kwargs.get('sedshape', 'PowerLaw')
    cutoff = kwargs.get('cutoff', 1e3)
    curvindex = kwargs.get('curvindex', 1.0)
    beta = kwargs.get('beta', 0.0)
    dmmass = kwargs.get('DMmass', 100.0)
    dmchannel = kwargs.get('DMchannel', 'bb')
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    galdiff_fit_filepath = kwargs.get('galdiff_fit', None)
    isodiff_fit_filepath = kwargs.get('isodiff_fit', None)
    wcs_npix = kwargs.get('wcs_npix', 40)
    wcs_cdelt = kwargs.get('wcs_cdelt', 0.5)
    wcs_proj = kwargs.get('wcs_proj', 'AIT')
    map_type = kwargs.get('map_type', None)
    spatial_model = kwargs.get('spatial_model', 'PointSource')
    spatial_size = kwargs.get('spatial_size', 1E-2)

    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    nside = kwargs.get('hpx_nside', 16)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]

    if sedshape == 'PowerLaw':
        fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)
    elif sedshape == 'PLSuperExpCutoff':
        fn = spectrum.PLSuperExpCutoff([1E-13, -index, cutoff, curvindex],
                                       scale=1E3)
    elif sedshape == 'LogParabola':
        fn = spectrum.LogParabola([1E-13, -index, beta], scale=1E3)
    elif sedshape == 'DM':
        fn = spectrum.DMFitFunction([1E-26, dmmass], chan=dmchannel)

    log_ebins = np.linspace(np.log10(emin), np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)
    gdiff_fit = None
    if galdiff_fit_filepath is not None:
        gdiff_fit = skymap.Map.create_from_fits(galdiff_fit_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=['$FERMI_DIFFUSE_DIR'])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)
    iso_fit = None
    if isodiff_fit_filepath is not None:
        iso_fit = np.loadtxt(isodiff_fit_filepath, unpack=True)

    scalc = SensitivityCalc(gdiff,
                            iso,
                            ltc,
                            ebins,
                            event_class,
                            event_types,
                            gdiff_fit=gdiff_fit,
                            iso_fit=iso_fit,
                            spatial_model=spatial_model,
                            spatial_size=spatial_size)

    # Compute Maps
    map_diff_flux = None
    map_diff_npred = None
    map_int_flux = None
    map_int_npred = None

    map_nstep = 500

    if map_type == 'hpx':

        hpx = HPX(nside, True, 'GAL', ebins=ebins)
        map_diff_flux = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_diff_npred = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_skydir = map_diff_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.diff_flux_threshold(map_skydir[s], fn, ts_thresh,
                                          min_counts)
            map_diff_flux.data[:, s] = o['flux'].T
            map_diff_npred.data[:, s] = o['npred'].T

        hpx = HPX(nside, True, 'GAL')
        map_int_flux = HpxMap(np.zeros((hpx.npix)), hpx)
        map_int_npred = HpxMap(np.zeros((hpx.npix)), hpx)
        map_skydir = map_int_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.int_flux_threshold(map_skydir[s], fn, ts_thresh,
                                         min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    elif map_type == 'wcs':

        wcs_shape = [wcs_npix, wcs_npix]
        wcs_size = wcs_npix * wcs_npix

        map_diff_flux = Map.create(c,
                                   wcs_cdelt,
                                   wcs_shape,
                                   'GAL',
                                   wcs_proj,
                                   ebins=ebins)
        map_diff_npred = Map.create(c,
                                    wcs_cdelt,
                                    wcs_shape,
                                    'GAL',
                                    wcs_proj,
                                    ebins=ebins)
        map_skydir = map_diff_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (slice(None), idx[1], idx[0])
            o = scalc.diff_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                          fn, ts_thresh, min_counts)
            map_diff_flux.data[s] = o['flux'].T
            map_diff_npred.data[s] = o['npred'].T

        map_int_flux = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_int_npred = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_skydir = map_int_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (idx[1], idx[0])
            o = scalc.int_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                         fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [
        Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
        Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
        Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
        Column(name='flux', dtype='f8', data=o['flux'], unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', data=o['eflux'],
               unit='MeV / (cm2 s)'),
        Column(name='dnde',
               dtype='f8',
               data=o['dnde'],
               unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde',
               dtype='f8',
               data=o['e2dnde'],
               unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', data=o['npred'], unit='ph')
    ]

    tab_diff = Table(cols)

    cols = [
        Column(name='index', dtype='f8'),
        Column(name='e_min', dtype='f8', unit='MeV'),
        Column(name='e_ref', dtype='f8', unit='MeV'),
        Column(name='e_max', dtype='f8', unit='MeV'),
        Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', unit='ph'),
        Column(name='ebin_e_min', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_ref', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_max', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_flux',
               dtype='f8',
               unit='ph / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_eflux',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_dnde',
               dtype='f8',
               unit='ph / (MeV cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_e2dnde',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_npred', dtype='f8', unit='ph', shape=(len(ectr), ))
    ]

    cols_ebounds = [
        Column(name='E_MIN', dtype='f8', unit='MeV', data=ebins[:-1]),
        Column(name='E_MAX', dtype='f8', unit='MeV', data=ebins[1:]),
    ]

    tab_int = Table(cols)
    tab_ebounds = Table(cols_ebounds)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if colname == 'index':
                continue
            if 'ebin' in colname:
                row += [o['bins'][colname.replace('ebin_', '')]]
            else:
                row += [o[colname]]

        tab_int.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_ebounds))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'EBOUNDS'

    if map_type is not None:
        hdu = map_diff_flux.create_image_hdu()
        hdu.name = 'MAP_DIFF_FLUX'
        hdulist.append(hdu)
        hdu = map_diff_npred.create_image_hdu()
        hdu.name = 'MAP_DIFF_NPRED'
        hdulist.append(hdu)

        hdu = map_int_flux.create_image_hdu()
        hdu.name = 'MAP_INT_FLUX'
        hdulist.append(hdu)
        hdu = map_int_npred.create_image_hdu()
        hdu.name = 'MAP_INT_NPRED'
        hdulist.append(hdu)

    hdulist.writeto(output, overwrite=True)
コード例 #37
0
              for ii, el in enumerate(N_in_test)]))).astype('int')
# IDs of the satellite in the galaxy and coordinate file
# for f1, f2, f3
sf = n.hstack((idx_arrays))

# distance to cluster_center
#r_o_rvir = n.hstack((distances)) / (rvir_CLU[CLU_ids] / 1000.)

t = Table()

for col_name in f1.columns.keys():
    print(col_name, )
    if col_name == 'LX_hard':
        print('--not added', )
    else:
        t.add_column(Column(name=col_name, data=f1[col_name][sf]))

for col_name in f2.columns.keys():
    print(col_name, )
    if col_name == 'LX_hard':
        print('--not added', )
    else:
        t.add_column(Column(name=col_name, data=f2[col_name][sf]))

for col_name in f3.columns.keys():
    print(col_name, )
    if col_name in n.array([
            'M200c', 'M500c', 'Xoff', 'b_to_a_500c', 'c_to_a_500c',
            'scale_of_last_MM', 'Acc_Rate_1_Tdyn'
    ]):
        print('--not added', )
コード例 #38
0
    def create_table(self, bubbles, galaxy_props):
        '''
        Create a Table from a list of bubbles
        '''

        # Check to make sure all of the properties are there
        gal_props_checker(galaxy_props)

        # Create columns of the bubble properties
        props = {
            "pa": [u.deg, "Position angle of the bubble"],
            "bubble_type": [u.dimensionless_unscaled, "Type of bubble"],
            "velocity_center": [u.km / u.s, "Center velocity"],
            "velocity_width":
            [u.km / u.s, "Range of velocities bubble"
             " is detected."],
            "eccentricity": [u.dimensionless_unscaled, "Shape eccentricity"],
            "expansion_velocity": [u.km / u.s, "Expansion velocity"],
            "avg_shell_flux_density":
            [u.K * u.km / u.s, "Average flux density in bubble "
             "shell"],
            "total_shell_flux_density":
            [u.K * u.km / u.s, "Total flux density in bubble "
             "shell"],
            "shell_column_density":
            [u.cm**-2, "Average column density in the "
             "shell"],
            "hole_contrast": [
                u.dimensionless_unscaled,
                "Average intensity difference between hole"
                " and shell."
            ],
            "diameter_physical": [u.pc, "Physical diameter"],
            "major_physical": [u.pc, "Physical major radius"],
            "minor_physical": [u.pc, "Physical minor radius"],
            "diameter_angular": [u.deg, "Angular diameter"],
            "major_angular": [u.deg, "Angular major radius"],
            "minor_angular": [u.deg, "Angular minor radius"],
            "galactic_radius": [u.kpc, "Galactic radius of the"
                                " center."],
            "galactic_pa": [u.deg, "Galactic PA of the center."],
            "shell_fraction": [
                u.dimensionless_unscaled,
                "Fraction of shell enclosing the hole."
            ],
            "is_closed": [
                u.dimensionless_unscaled, "Closed or partial "
                "shell (shell fraction > 0.9 is closed)"
            ]
        }

        prop_funcs = {
            "tkin": [u.Myr, "Kinetic age of the bubble.", {}],
            "shell_volume_density": [
                u.cm**-3, "Average hydrogen volume "
                "density in the shell.", {
                    "scale_height": galaxy_props["scale_height"],
                    "inclination": galaxy_props["inclination"]
                }
            ],
            "volume": [
                u.pc**3, "Volume of the hole.", {
                    "scale_height": galaxy_props["scale_height"]
                }
            ],
            "hole_mass": [
                u.Msun, "Inferred mass of the hole from the shell"
                " volume density.", {
                    "scale_height": galaxy_props["scale_height"],
                    "inclination": galaxy_props["inclination"]
                }
            ],
            "formation_energy": [
                u.erg, "Energy required to create the hole.", {
                    "scale_height": galaxy_props["scale_height"],
                    "inclination": galaxy_props["inclination"]
                }
            ]
        }

        columns = []

        # The center coordinates are different, since they're SkyCoords
        columns.append(SkyCoord([bub.center_coordinate for bub in bubbles]))

        # Same for is_closed
        columns.append(
            Column([bub.is_closed for bub in bubbles],
                   unit=u.dimensionless_unscaled,
                   description="Closed or partial shell.",
                   name="closed_shell"))

        # Add the properties
        for name in props:
            unit, descrip = props[name]
            columns.append(
                Column(_has_nan(
                    [getattr(bub, name).to(unit).value for bub in bubbles],
                    name),
                       name=name,
                       description=descrip,
                       unit=unit.to_string()))

        # Add the functions
        for name in prop_funcs:
            unit, descrip, imps = prop_funcs[name]
            columns.append(
                Column(_has_nan([
                    getattr(bub, name)(**imps).to(unit).value
                    for bub in bubbles
                ], name),
                       name=name,
                       description=descrip,
                       unit=unit.to_string()))

        # all_names = ["center_coordinate"] + props.keys() + prop_funcs.keys()

        self.table = Table(columns)
コード例 #39
0
def make_obsdata_from_model(model_filename,
                            model_type='tlusty',
                            model_params=None,
                            output_filebase=None,
                            output_path=None,
                            show_plot=False):
    """
    Create the necessary data files (.dat and spectra) from a
    stellar model atmosphere model to use as the unreddened
    comparsion star in the measure_extinction package

    Parameters
    ----------
    model_filename: string
        name of the file with the stellar atmosphere model spectrum

    model_type: string [default = 'tlusty']
        model type

    model_params: dict of {type: value}
        model parameters
        e.g., {'Teff': 10000.0, 'logg': 4.0, 'Z': 1, 'vturb': 2.0}

    output_filebase: string
        base for the output files
        E.g., output_filebase.dat and output_filebase_stis.fits

    output_path: string
        path to use for output files

    show_plot: boolean
        show a plot of the original and rebinned spectra/photometry
    """

    if output_filebase is None:
        output_filebase = '%s_standard' % (model_filename)

    if output_path is None:
        output_path = '/home/kgordon/Python_git/extstar_data/'

    allowed_model_types = ['tlusty']
    if model_type not in allowed_model_types:
        raise ValueError("%s not an allowed model type" % (model_type))

    # read in the model spectrum
    mspec = ascii.read(model_filename,
                       format='no_header',
                       fast_reader={'exponent_style': 'D'},
                       names=['Freq', 'SFlux'])

    # error in file where the exponent 'D' is missing
    #   means that SFlux is read in as a string
    # solution is to remove the rows with the problem and replace
    #   the fortran 'D' with an 'E' and then convert to floats
    if mspec['SFlux'].dtype != np.float:
        indxs = [k for k in range(len(mspec)) if 'D' not in mspec['SFlux'][k]]
        if len(indxs) > 0:
            indxs = [k for k in range(len(mspec)) if 'D' in mspec['SFlux'][k]]
            mspec = mspec[indxs]
            new_strs = [cval.replace('D', 'E') for cval in mspec['SFlux'].data]
            mspec['SFlux'] = new_strs
            mspec['SFlux'] = mspec['SFlux'].astype(np.float)

    # set the units
    mspec['Freq'].unit = u.Hz
    mspec['SFlux'].unit = u.erg / (u.s * u.cm * u.cm * u.Hz)

    # now extract the wave and flux colums
    mfreq = mspec['Freq'].quantity
    mwave = mfreq.to(u.angstrom, equivalencies=u.spectral())
    mflux = mspec['SFlux'].quantity.to(u.erg /
                                       (u.s * u.cm * u.cm * u.angstrom),
                                       equivalencies=u.spectral_density(mfreq))

    # rebin to R=5000 for speed
    #   use a wavelength range that spans FUSE to Spitzer IRS
    wave_r5000, flux_r5000, npts_r5000 = rebin_spectrum(
        mwave.value, mflux.value, 5000, [912., 500000.])

    # save the full spectrum to a binary FITS table
    otable = Table()
    otable['WAVELENGTH'] = Column(wave_r5000, unit=u.angstrom)
    otable['FLUX'] = Column(flux_r5000,
                            unit=u.erg / (u.s * u.cm * u.cm * u.angstrom))
    otable['SIGMA'] = Column(flux_r5000 * 0.0,
                             unit=u.erg / (u.s * u.cm * u.cm * u.angstrom))
    otable['NPTS'] = Column(npts_r5000)
    otable.write("%s/Models/%s_full.fits" % (output_path, output_filebase),
                 overwrite=True)

    # dictionary to saye names of spectroscopic filenames
    specinfo = {}

    # create the ultraviolet HST/STIS mock observation
    # first create the spectrum convolved to the STIS low resolution
    # Resolution approximately 1000
    stis_fwhm_pix = 5000. / 1000.
    g = Gaussian1DKernel(stddev=stis_fwhm_pix / 2.355)

    # Convolve data
    nflux = convolve(otable['FLUX'].data, g)

    stis_table = Table()
    stis_table['WAVELENGTH'] = otable['WAVELENGTH']
    stis_table['FLUX'] = nflux
    stis_table['NPTS'] = otable['NPTS']
    stis_table['STAT-ERROR'] = Column(np.full((len(stis_table)), 1.0))
    stis_table['SYS-ERROR'] = otable['SIGMA']
    # UV STIS obs
    rb_stis_uv = merge_stis_obsspec([stis_table], waveregion='UV')
    rb_stis_uv['SIGMA'] = rb_stis_uv['FLUX'] * 0.0
    stis_uv_file = "%s_stis_uv.fits" % (output_filebase)
    rb_stis_uv.write("%s/Models/%s" % (output_path, stis_uv_file),
                     overwrite=True)
    specinfo['STIS'] = stis_uv_file
    # Optical STIS obs
    rb_stis_opt = merge_stis_obsspec([stis_table], waveregion='Opt')
    rb_stis_opt['SIGMA'] = rb_stis_opt['FLUX'] * 0.0
    stis_opt_file = "%s_stis_opt.fits" % (output_filebase)
    rb_stis_opt.write("%s/Models/%s" % (output_path, stis_opt_file),
                      overwrite=True)
    specinfo['STIS_Opt'] = stis_opt_file

    # Spitzer IRS mock observation
    # Resolution approximately 100
    lrs_fwhm_pix = 5000. / 100.
    g = Gaussian1DKernel(stddev=lrs_fwhm_pix / 2.355)

    # Convolve data
    nflux = convolve(otable['FLUX'].data, g)

    lrs_table = Table()
    lrs_table['WAVELENGTH'] = otable['WAVELENGTH']
    lrs_table['FLUX'] = nflux
    lrs_table['NPTS'] = otable['NPTS']
    lrs_table['ERROR'] = Column(np.full((len(lrs_table)), 1.0))

    rb_lrs = merge_irs_obsspec([lrs_table])
    rb_lrs['SIGMA'] = rb_lrs['FLUX'] * 0.0
    lrs_file = "%s_irs.fits" % (output_filebase)
    rb_lrs.write("%s/Models/%s" % (output_path, lrs_file), overwrite=True)
    specinfo['IRS'] = lrs_file

    # compute photometry
    # band_path = "%s/Band_RespCurves/" % output_path
    john_bands = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']
    john_fnames = ["John%s.dat" % (cband) for cband in john_bands]
    hst_bands = [
        'HST_WFC3_UVIS1_F275W', 'HST_WFC3_UVIS1_F336W', 'HST_WFC3_UVIS1_F475W',
        'HST_WFC3_UVIS1_F814W', 'HST_WFC3_IR_F110W', 'HST_WFC3_IR_F160W',
        'HST_ACS_WFC1_F475W', 'HST_ACS_WFC1_F814W', 'HST_WFPC2_4_F170W'
    ]
    hst_fnames = ['']
    # spitzer_bands = ['IRAC1', 'IRAC2', 'IRAC3', 'IRAC4', 'IRS15', 'MIPS24']
    # spitzer_fnames = ["{}/{}.dat".format(band_path, cband)
    #                   for cband in spitzer_bands]
    bands = john_bands + hst_bands
    band_fnames = john_fnames + hst_fnames
    # bands = john_bands
    # band_fnames = john_fnames

    bandinfo = get_phot(wave_r5000, flux_r5000, bands, band_fnames)

    # create the DAT file
    dat_filename = "%s/Models/%s.dat" % (output_path, output_filebase)
    header_info = [
        "# obsdata created from %s model atmosphere" % model_type,
        "# %s" % (output_filebase),
        "# file created by make_obsdata_from_model.py",
        "model_type = %s" % model_type
    ]
    write_dat_file(dat_filename,
                   bandinfo,
                   specinfo,
                   modelparams=model_params,
                   header_info=header_info)

    if show_plot:
        fig, ax = plt.subplots(figsize=(13, 10))
        # indxs, = np.where(npts_r5000 > 0)
        ax.plot(wave_r5000 * 1e-4, flux_r5000, 'b-')
        ax.plot(bandinfo.waves, bandinfo.fluxes, 'ro')

        indxs, = np.where(rb_stis_uv['NPTS'] > 0)
        ax.plot(rb_stis_uv['WAVELENGTH'][indxs].to(u.micron),
                rb_stis_uv['FLUX'][indxs], 'm-')
        indxs, = np.where(rb_stis_opt['NPTS'] > 0)
        ax.plot(rb_stis_opt['WAVELENGTH'][indxs].to(u.micron),
                rb_stis_opt['FLUX'][indxs], 'g-')
        indxs, = np.where(rb_lrs['NPTS'] > 0)
        ax.plot(rb_lrs['WAVELENGTH'][indxs].to(u.micron),
                rb_lrs['FLUX'][indxs], 'c-')
        ax.set_xscale('log')
        ax.set_yscale('log')
        plt.show()
コード例 #40
0
ファイル: templates.py プロジェクト: akremin/desisim
    def make_templates(self, zrange=(0.6,1.6), rmagrange=(21.0,23.4),
                       oiiihbrange=(-0.5,0.1), oiidoublet_meansig=(0.73,0.05),
                       linesigma_meansig=(1.887,0.175), minoiiflux=1E-17,
                       no_colorcuts=False):
        """Build Monte Carlo set of ELG spectra/templates.

        This function chooses random subsets of the ELG continuum spectra, constructs
        an emission-line spectrum, redshifts, and then finally normalizes the spectrum
        to a specific r-band magnitude.

        TODO (@moustakas): optionally normalized to a g-band magnitude

        Args:
          zrange (float, optional): Minimum and maximum redshift range.  Defaults
            to a uniform distribution between (0.6,1.6).
          rmagrange (float, optional): Minimum and maximum DECam r-band (AB)
            magnitude range.  Defaults to a uniform distribution between (21,23.4).
          oiiihbrange (float, optional): Minimum and maximum logarithmic
            [OIII] 5007/H-beta line-ratio.  Defaults to a uniform distribution
            between (-0.5,0.1).
        
          oiidoublet_meansig (float, optional): Mean and sigma values for the (Gaussian) 
            [OII] 3726/3729 doublet ratio distribution.  Defaults to (0.73,0.05).
          linesigma_meansig (float, optional): *Logarithmic* mean and sigma values for the
            (Gaussian) emission-line velocity width distribution.  Defaults to
            log10-sigma(=1.887+/0.175) km/s.

          minoiiflux (float, optional): Minimum [OII] 3727 flux [default 1E-17 erg/s/cm2].
            Set this parameter to zero to not have a minimum flux cut.
          no_colorcuts (bool, optional): Do not apply the fiducial grz color-cuts
            cuts (default False).
        
        Returns:
          outflux (numpy.ndarray): Array [nmodel,npix] of observed-frame spectra [erg/s/cm2/A]. 
          meta (astropy.Table): Table of meta-data for each output spectrum [nmodel].

        Raises:

        """
        from astropy.table import Table, Column

        from desisim.templates import EMSpectrum
        from desispec.interpolation import resample_flux

        # Initialize the EMSpectrum object with the same wavelength array as
        # the "base" (continuum) templates so that we don't have to resample. 
        EM = EMSpectrum(log10wave=np.log10(self.basewave),seed=self.seed)
       
        # Initialize the output flux array and metadata Table.
        outflux = np.zeros([self.nmodel,len(self.wave)]) # [erg/s/cm2/A]

        meta = Table()
        meta['TEMPLATEID'] = Column(np.zeros(self.nmodel,dtype='i4'))
        meta['REDSHIFT'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['GMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['RMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['W1MAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['OIIFLUX'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['EWOII'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['OIIIHBETA'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['OIIDOUBLET'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['LINESIGMA'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['D4000'] = Column(np.zeros(self.nmodel,dtype='f4'))

        meta['OIIFLUX'].unit = 'erg/s/cm2'
        meta['EWOII'].unit = 'Angstrom'
        meta['OIIIHBETA'].unit = 'dex'
        meta['LINESIGMA'].unit = 'km/s'

        comments = dict(
            TEMPLATEID = 'template ID',
            REDSHIFT = 'object redshift',
            GMAG = 'DECam g-band AB magnitude',
            RMAG = 'DECam r-band AB magnitude',
            ZMAG = 'DECam z-band AB magnitude',
            W1MAG = 'WISE W1-band AB magnitude',
            OIIFLUX = '[OII] 3727 flux',
            EWOII = 'rest-frame equivalenth width of [OII] 3727',
            OIIIHBETA = 'logarithmic [OIII] 5007/H-beta ratio',
            OIIDOUBLET = '[OII] 3726/3729 doublet ratio',
            LINESIGMA = 'emission line velocity width',
            D4000 = '4000-Angstrom break'
        )

        nobj = 0
        nbase = len(self.basemeta)
        nchunk = min(self.nmodel,500)

        Cuts = TargetCuts()
        while nobj<=(self.nmodel-1):
            # Choose a random subset of the base templates
            chunkindx = self.rand.randint(0,nbase-1,nchunk)

            # Assign uniform redshift and r-magnitude distributions.
            redshift = self.rand.uniform(zrange[0],zrange[1],nchunk)
            rmag = self.rand.uniform(rmagrange[0],rmagrange[1],nchunk)

            # Assume the emission-line priors are uncorrelated.
            oiiihbeta = self.rand.uniform(oiiihbrange[0],oiiihbrange[1],nchunk)
            oiidoublet = self.rand.normal(oiidoublet_meansig[0],
                                          oiidoublet_meansig[1],nchunk)
            linesigma = self.rand.normal(linesigma_meansig[0],
                                         linesigma_meansig[1],nchunk)

            d4000 = self.basemeta['D4000'][chunkindx]
            ewoii = 10.0**(np.polyval([1.1074,-4.7338,5.6585],d4000)+ 
                           self.rand.normal(0.0,0.3)) # rest-frame, Angstrom

            # Unfortunately we have to loop here.
            for ii, iobj in enumerate(chunkindx):
                zwave = self.basewave*(1.0+redshift[ii])

                # Add the continuum and emission-line spectra with the
                # right [OII] flux [erg/s/cm2]
                oiiflux = self.basemeta['OII_CONTINUUM'][iobj]*ewoii[ii] 
                emflux, emwave, emline = EM.spectrum(linesigma=linesigma[ii],
                                                      oiidoublet=oiidoublet[ii],
                                                      oiiihbeta=oiiihbeta[ii],
                                                      oiiflux=oiiflux)
                restflux = self.baseflux[iobj,:] + emflux # [erg/s/cm2/A @10pc]
                rnorm = 10.0**(-0.4*rmag[ii])/self.rfilt.get_maggies(zwave,restflux)
                flux = restflux*rnorm # [erg/s/cm2/A, @redshift[ii]]

                # [grz]flux are in nanomaggies
                rflux = 10.0**(-0.4*(rmag[ii]-22.5))                      
                gflux = self.gfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                zflux = self.zfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                w1flux = self.w1filt.get_maggies(zwave,flux)*10**(0.4*22.5) 

                zoiiflux = oiiflux*rnorm # [erg/s/cm2]
                oiimask = [zoiiflux>minoiiflux]

                if no_colorcuts:
                    grzmask = [True]
                else:
                    grzmask = [Cuts.ELG(gflux=gflux,rflux=rflux,zflux=zflux)]

                if all(grzmask) and all(oiimask):
                    if ((nobj+1)%10)==0:
                        print('Simulating {} template {}/{}'.format(self.objtype,nobj+1,self.nmodel))
                    outflux[nobj,:] = resample_flux(self.wave,zwave,flux)

                    meta['TEMPLATEID'][nobj] = nobj
                    meta['REDSHIFT'][nobj] = redshift[ii]
                    meta['GMAG'][nobj] = -2.5*np.log10(gflux)+22.5
                    meta['RMAG'][nobj] = rmag[ii]
                    meta['ZMAG'][nobj] = -2.5*np.log10(zflux)+22.5
                    meta['W1MAG'][nobj] = -2.5*np.log10(w1flux)+22.5
                    meta['OIIFLUX'][nobj] = zoiiflux
                    meta['EWOII'][nobj] = ewoii[ii]
                    meta['OIIIHBETA'][nobj] = oiiihbeta[ii]
                    meta['OIIDOUBLET'][nobj] = oiidoublet[ii]
                    meta['LINESIGMA'][nobj] = linesigma[ii]
                    meta['D4000'][nobj] = d4000[ii]

                    nobj = nobj+1

                # If we have enough models get out!
                if nobj>=(self.nmodel-1):
                    break

        return outflux, self.wave, meta
コード例 #41
0
def main():

    parser = argparse.ArgumentParser(
        description="RGB predictions for Gaia EDR3 stars")
    parser.add_argument("ra_center",
                        help="right Ascension (decimal degrees)",
                        type=float)
    parser.add_argument("dec_center",
                        help="declination (decimal degrees)",
                        type=float)
    parser.add_argument("search_radius",
                        help="search radius (decimal degrees)",
                        type=float)
    parser.add_argument("g_limit",
                        help="limiting Gaia G magnitude",
                        type=float)
    parser.add_argument("--basename",
                        help="file basename for output files",
                        type=str,
                        default="rgbsearch")
    parser.add_argument(
        "--brightlimit",
        help=
        "stars brighter than this Gaia G limit are displayed with star symbols (default=8.0)",
        type=float,
        default=8.0)
    parser.add_argument(
        "--symbsize",
        help="multiplying factor for symbol size (default=1.0)",
        type=float,
        default=1.0)
    parser.add_argument("--nonumbers",
                        help="do not display star numbers in PDF chart",
                        action="store_true")
    parser.add_argument("--noplot",
                        help="skip PDF chart generation",
                        action="store_true")
    parser.add_argument("--nocolor",
                        help="do not use colors in PDF chart",
                        action="store_true")
    parser.add_argument("--starhorse_block",
                        help="number of stars/query (default=0, no query)",
                        default=0,
                        type=int)
    parser.add_argument("--verbose",
                        help="increase program verbosity",
                        action="store_true")
    parser.add_argument("--debug", help="debug flag", action="store_true")

    args = parser.parse_args()

    if len(sys.argv) == 1:
        parser.print_usage()
        raise SystemExit()

    if args.ra_center < 0 or args.ra_center > 360:
        raise SystemExit('ERROR: right ascension out of valid range')
    if args.dec_center < -90 or args.dec_center > 90:
        raise SystemExit('ERROR: declination out of valid range')
    if args.search_radius < 0:
        raise SystemExit('ERROR: search radius must be > 0 degrees')
    if args.search_radius > MAX_SEARCH_RADIUS:
        raise SystemExit(
            f'ERROR: search radius must be <= {MAX_SEARCH_RADIUS} degrees')

    # check whether the auxiliary FITS binary table exists
    if args.debug:
        auxbintable = RGB_FROM_GAIA_ALLSKY
    else:
        auxbintable = EDR3_SOURCE_ID_15M_ALLSKY
    if os.path.isfile(auxbintable):
        pass
    else:
        urldir = f'http://nartex.fis.ucm.es/~ncl/rgbphot/gaia/{auxbintable}'
        sys.stdout.write(f'Downloading {urldir}... (please wait)')
        sys.stdout.flush()
        urllib.request.urlretrieve(urldir, auxbintable)
        print(' ...OK!')

    # read the previous file
    try:
        with fits.open(auxbintable) as hdul_table:
            edr3_source_id_15M_allsky = hdul_table[1].data.source_id
            if args.debug:
                edr3_b_rgb_15M_allsky = hdul_table[1].data.B_rgb
                edr3_g_rgb_15M_allsky = hdul_table[1].data.G_rgb
                edr3_r_rgb_15M_allsky = hdul_table[1].data.R_rgb
                edr3_g_br_rgb_15M_allsky = hdul_table[1].data.G_BR_rgb
                edr3_g_gaia_15M_allsky = hdul_table[1].data.G_gaia
                edr3_bp_gaia_15M_allsky = hdul_table[1].data.BP_gaia
                edr3_rp_gaia_15M_allsky = hdul_table[1].data.RP_gaia
                edr3_av50_15M_allsky = hdul_table[1].data.av50
                edr3_met50_15M_allsky = hdul_table[1].data.met50
                edr3_dist50_15M_allsky = hdul_table[1].data.dist50
    except FileNotFoundError:
        raise SystemExit(
            f'ERROR: unexpected problem while reading {EDR3_SOURCE_ID_15M_ALLSKY}'
        )

    # define WCS
    naxis1 = 1024
    naxis2 = naxis1
    pixscale = 2 * args.search_radius / naxis1

    wcs_image = WCS(naxis=2)
    wcs_image.wcs.crpix = [naxis1 / 2, naxis2 / 2]
    wcs_image.wcs.crval = [args.ra_center, args.dec_center]
    wcs_image.wcs.cunit = ["deg", "deg"]
    wcs_image.wcs.ctype = ["RA---TAN", "DEC--TAN"]
    wcs_image.wcs.cdelt = [-pixscale, pixscale]
    wcs_image.array_shape = [naxis1, naxis2]
    if args.verbose:
        print(wcs_image)

    # ---

    # EDR3 query
    query = f"""
    SELECT source_id, ra, dec,
    phot_g_mean_mag, phot_bp_mean_mag, phot_rp_mean_mag

    FROM gaiaedr3.gaia_source
    WHERE 1=CONTAINS(
      POINT('ICRS', {args.ra_center}, {args.dec_center}), 
      CIRCLE('ICRS',ra, dec, {args.search_radius}))
    AND phot_g_mean_mag IS NOT NULL 
    AND phot_bp_mean_mag IS NOT NULL 
    AND phot_rp_mean_mag IS NOT NULL
    AND phot_g_mean_mag < {args.g_limit}
    
    ORDER BY ra
    """
    sys.stdout.write(
        '<STEP1> Starting cone search in Gaia EDR3... (please wait)\n  ')
    sys.stdout.flush()
    job = Gaia.launch_job_async(query)
    r_edr3 = job.get_results()
    # compute G_BP - G_RP colour
    r_edr3.add_column(
        Column(r_edr3['phot_bp_mean_mag'] - r_edr3['phot_rp_mean_mag'],
               name='bp_rp',
               unit=u.mag))
    # colour cut in BP-RP
    mask_colour = np.logical_or((r_edr3['bp_rp'] <= -0.5),
                                (r_edr3['bp_rp'] >= 2.0))
    r_edr3_colorcut = r_edr3[mask_colour]
    nstars = len(r_edr3)
    print(f'        --> {nstars} stars found')
    nstars_colorcut = len(r_edr3_colorcut)
    print(
        f'        --> {nstars_colorcut} stars outside -0.5 < G_BP-G_RP < 2.0')
    if nstars == 0:
        raise SystemExit('ERROR: no stars found. Change search parameters!')
    if args.verbose:
        r_edr3.pprint(max_width=1000)

    # ---

    # intersection with StarHorse star sample
    if args.starhorse_block > 0:
        param_starhorse = [
            'dr3_source_id', 'sh_gaiaflag', 'sh_outflag', 'dist05', 'dist16',
            'dist50', 'dist84', 'dist95', 'av05', 'av16', 'av50', 'av84',
            'av95', 'teff16', 'teff50', 'teff84', 'logg16', 'logg50', 'logg84',
            'met16', 'met50', 'met84', 'mass16', 'mass50', 'mass84', 'xgal',
            'ygal', 'zgal', 'rgal', 'ruwe', 'angular_distance',
            'magnitude_difference', 'proper_motion_propagation',
            'dup_max_number'
        ]
        print(
            '<STEP2> Retrieving StarHorse data from Gaia@AIP... (please wait)')
        print(f'        pyvo version {pyvo.__version__}')
        print(f'        TAP service GAIA@AIP')
        nstars_per_block = args.starhorse_block
        nblocks = int(nstars / nstars_per_block)
        r_starhorse = None
        if nstars - nblocks * nstars_per_block > 0:
            nblocks += 1
        for iblock in range(nblocks):
            irow1 = iblock * nstars_per_block
            irow2 = min(irow1 + nstars_per_block, nstars)
            print(f'        Starting query #{iblock+1} of {nblocks}...')
            dumstr = ','.join(
                [str(item) for item in r_edr3[irow1:irow2]['source_id']])
            query = f"""
            SELECT {','.join(param_starhorse)}
            FROM gaiadr2_contrib.starhorse
            WHERE dr3_source_id IN ({dumstr})
            """
            tap_session = requests.Session()
            tap_session.headers['Authorization'] = "kkk"
            tap_service = pyvo.dal.TAPService('https://gaia.aip.de/tap',
                                              session=tap_session)
            tap_result = tap_service.run_sync(query)
            if args.debug:
                print(tap_result.to_table())
            if iblock == 0:
                r_starhorse = tap_result.to_table()
            else:
                r_starhorse = vstack(
                    [r_starhorse, tap_result.to_table()],
                    join_type='exact',
                    metadata_conflicts='silent')

        nstars_starhorse = len(r_starhorse)
        if args.verbose:
            if nstars_starhorse > 0:
                r_starhorse.pprint(max_width=1000)

        # join tables
        print(f'        --> {nstars_starhorse} stars found in StarHorse')
        print('        Joining EDR3 and StarHorse queries...')
        r_starhorse.rename_column('dr3_source_id', 'source_id')
        r_edr3 = join(r_edr3, r_starhorse, keys='source_id', join_type='outer')
        r_edr3.sort('ra')
        if args.verbose:
            r_edr3.pprint(max_width=1000)

    else:
        print('<STEP2> Retrieving StarHorse data from Gaia@AIP... (skipped!)')

    # ---

    # intersection with 15M star sample
    sys.stdout.write(
        '<STEP3> Cross-matching EDR3 with 15M subsample... (please wait)')
    sys.stdout.flush()
    set1 = set(np.array(r_edr3['source_id']))
    set2 = set(edr3_source_id_15M_allsky)
    intersection = set2.intersection(set1)
    print(f'\n        --> {len(intersection)} stars in common with 15M sample')
    if args.verbose:
        print(len(set1), len(set2), len(intersection))

    # ---

    # DR2 query to identify variable stars
    query = f"""
    SELECT source_id, ra, dec, phot_g_mean_mag, phot_variable_flag

    FROM gaiadr2.gaia_source
    WHERE  1=CONTAINS(
      POINT('ICRS', {args.ra_center}, {args.dec_center}), 
      CIRCLE('ICRS',ra, dec, {args.search_radius}))
    AND phot_g_mean_mag < {args.g_limit}
    """
    sys.stdout.write(
        '<STEP4> Looking for variable stars in Gaia DR2... (please wait)\n  ')
    sys.stdout.flush()
    job = Gaia.launch_job_async(query)
    r_dr2 = job.get_results()
    nstars_dr2 = len(r_dr2)
    if nstars_dr2 == 0:
        nvariables = 0
        mask_var = None
    else:
        if isinstance(r_dr2['phot_variable_flag'][0], bytes):
            mask_var = r_dr2['phot_variable_flag'] == b'VARIABLE'
        elif isinstance(r_dr2['phot_variable_flag'][0], str):
            mask_var = r_dr2['phot_variable_flag'] == 'VARIABLE'
        else:
            raise SystemExit(
                'Unexpected type of data in column phot_variable_flag')
        nvariables = sum(mask_var)
        print(
            f'        --> {nstars_dr2} stars in DR2, ({nvariables} initial variables)'
        )
    if nvariables > 0:
        if args.verbose:
            r_dr2[mask_var].pprint(max_width=1000)

    # ---

    # cross-match between DR2 and EDR3 to identify the variable stars
    dumstr = '('
    if nvariables > 0:
        # generate sequence of source_id of variable stars
        dumstr = ','.join([str(item) for item in r_dr2[mask_var]['source_id']])
        # cross-match
        query = f"""
        SELECT *
        FROM gaiaedr3.dr2_neighbourhood
        WHERE dr2_source_id IN ({dumstr})
        ORDER BY angular_distance
        """
        sys.stdout.write(
            '<STEP5> Cross-matching variables in DR2 with stars in EDR3... (please wait)\n  '
        )
        sys.stdout.flush()
        job = Gaia.launch_job_async(query)
        r_cross_var = job.get_results()
        if args.verbose:
            r_cross_var.pprint(max_width=1000)
        nvariables = len(r_cross_var)
        if nvariables > 0:
            # check that the variables pass the same selection as the EDR3 stars
            # (this includes de colour cut)
            mask_var = []
            for item in r_cross_var['dr3_source_id']:
                if item in r_edr3['source_id']:
                    mask_var.append(True)
                else:
                    mask_var.append(False)
            r_cross_var = r_cross_var[mask_var]
            nvariables = len(r_cross_var)
            if args.verbose:
                r_cross_var.pprint(max_width=1000)
        else:
            r_cross_var = None
    else:
        r_cross_var = None  # Avoid PyCharm warning
    print(f'        --> {nvariables} variable(s) in selected EDR3 star sample')

    # ---

    sys.stdout.write('<STEP6> Computing RGB magnitudes...')
    sys.stdout.flush()
    # predict RGB magnitudes
    coef_B = np.array([
        -0.13748689, 0.44265552, 0.37878846, -0.14923841, 0.09172474,
        -0.02594726
    ])
    coef_G = np.array([
        -0.02330159, 0.12884074, 0.22149167, -0.1455048, 0.10635149, -0.0236399
    ])
    coef_R = np.array([
        0.10979647, -0.14579334, 0.10747392, -0.1063592, 0.08494556,
        -0.01368962
    ])
    coef_X = np.array([
        -0.01252185, 0.13983574, 0.23688188, -0.10175532, 0.07401939,
        -0.0182115
    ])

    poly_B = Polynomial(coef_B)
    poly_G = Polynomial(coef_G)
    poly_R = Polynomial(coef_R)
    poly_X = Polynomial(coef_X)

    r_edr3.add_column(Column(np.round(
        r_edr3['phot_g_mean_mag'] + poly_B(r_edr3['bp_rp']), 2),
                             name='b_rgb',
                             unit=u.mag,
                             format='.2f'),
                      index=3)
    r_edr3.add_column(Column(np.round(
        r_edr3['phot_g_mean_mag'] + poly_G(r_edr3['bp_rp']), 2),
                             name='g_rgb',
                             unit=u.mag,
                             format='.2f'),
                      index=4)
    r_edr3.add_column(Column(np.round(
        r_edr3['phot_g_mean_mag'] + poly_R(r_edr3['bp_rp']), 2),
                             name='r_rgb',
                             unit=u.mag,
                             format='.2f'),
                      index=5)
    r_edr3.add_column(Column(np.round(
        r_edr3['phot_g_mean_mag'] + poly_X(r_edr3['bp_rp']), 2),
                             name='g_br_rgb',
                             unit=u.mag,
                             format='.2f'),
                      index=6)
    print('OK')
    if args.verbose:
        r_edr3.pprint(max_width=1000)

    # ---

    sys.stdout.write('<STEP7> Saving output CSV files...')
    sys.stdout.flush()
    outtypes = ['edr3', '15m', 'var']
    outtypes_color = {'edr3': 'black', '15m': 'red', 'var': 'blue'}
    r_edr3.add_column(
        Column(np.zeros(len(r_edr3)), name='number_csv', dtype=int))
    for item in outtypes:
        r_edr3.add_column(
            Column(np.zeros(len(r_edr3)), name=f'number_{item}', dtype=int))
    outlist = [f'./{args.basename}_{ftype}.csv' for ftype in outtypes]
    filelist = glob.glob('./*.csv')
    # remove previous versions of the output files (if present)
    for file in outlist:
        if file in filelist:
            try:
                os.remove(file)
            except:
                print(f'ERROR: while deleting existing file {file}')
    # columns to be saved (use a list to guarantee the same order)
    outcolumns_list = [
        'source_id', 'ra', 'dec', 'b_rgb', 'g_rgb', 'r_rgb', 'g_br_rgb',
        'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag'
    ]
    # define column format with a dictionary
    outcolumns = {
        'source_id': '19d',
        'ra': '14.9f',
        'dec': '14.9f',
        'b_rgb': '6.2f',
        'g_rgb': '6.2f',
        'r_rgb': '6.2f',
        'g_br_rgb': '6.2f',
        'phot_g_mean_mag': '8.4f',
        'phot_bp_mean_mag': '8.4f',
        'phot_rp_mean_mag': '8.4f'
    }
    if set(outcolumns_list) != set(outcolumns.keys()):
        raise SystemExit('ERROR: check outcolumns_list and outcolumns')
    csv_header_ini = 'number,' + ','.join(outcolumns_list)
    flist = []
    for ftype in outtypes:
        f = open(f'{args.basename}_{ftype}.csv', 'wt')
        flist.append(f)
        if (args.starhorse_block > 0) and (ftype in ['edr3', '15m']):
            if args.debug and (ftype == '15m'):
                csv_header = csv_header_ini + \
                             ',av50,met50,dist50,b_rgb_bis,g_rgb_bis,r_rgb_bis,g_br_rgb_bis,' \
                             'phot_g_mean_mag_bis,phot_bp_mean_mag_bis,phot_rp_mean_mag_bis,' \
                             'av50_bis,met50_bis,dist50_bis'
            else:
                csv_header = csv_header_ini + ',av50,met50,dist50'
        else:
            csv_header = csv_header_ini
        f.write(csv_header + '\n')
    # save each star in its corresponding output file
    krow = np.ones(len(outtypes), dtype=int)
    for irow, row in enumerate(r_edr3):
        cout = []
        for item in outcolumns_list:
            cout.append(eval("f'{row[item]:" + f'{outcolumns[item]}' + "}'"))
        iout = 0
        if nvariables > 0:
            if row['source_id'] in r_cross_var['dr3_source_id']:
                iout = 2
        if iout == 0:
            if args.starhorse_block > 0:
                for item in ['av50', 'met50', 'dist50']:
                    value = row[item]
                    if isinstance(value, float):
                        pass
                    else:
                        value = 99.999
                    cout.append(f'{value:7.3f}')
            if row['source_id'] in intersection:
                iout = 1
                if args.debug:
                    iloc = np.argwhere(
                        edr3_source_id_15M_allsky == row['source_id'])[0][0]
                    cout.append(f"{edr3_b_rgb_15M_allsky[iloc]:6.2f}")
                    cout.append(f"{edr3_g_rgb_15M_allsky[iloc]:6.2f}")
                    cout.append(f"{edr3_r_rgb_15M_allsky[iloc]:6.2f}")
                    cout.append(f"{edr3_g_br_rgb_15M_allsky[iloc]:6.2f}")
                    cout.append(f"{edr3_g_gaia_15M_allsky[iloc]:8.4f}")
                    cout.append(f"{edr3_bp_gaia_15M_allsky[iloc]:8.4f}")
                    cout.append(f"{edr3_rp_gaia_15M_allsky[iloc]:8.4f}")
                    cout.append(f"{edr3_av50_15M_allsky[iloc]:7.3f}")
                    cout.append(f"{edr3_met50_15M_allsky[iloc]:7.3f}")
                    cout.append(f"{edr3_dist50_15M_allsky[iloc]:7.3f}")
        flist[iout].write(f'{krow[iout]:6d}, ' + ','.join(cout) + '\n')
        r_edr3[irow]['number_csv'] = iout
        r_edr3[irow][f'number_{outtypes[iout]}'] = krow[iout]
        krow[iout] += 1
    for f in flist:
        f.close()
    print('OK')

    if args.verbose:
        print(r_edr3)

    if args.noplot:
        raise SystemExit()

    # ---

    sys.stdout.write('<STEP8> Generating PDF plot...')
    sys.stdout.flush()
    # generate plot
    r_edr3.sort('phot_g_mean_mag')
    if args.verbose:
        print('')
        r_edr3.pprint(max_width=1000)

    symbol_size = args.symbsize * (50 /
                                   np.array(r_edr3['phot_g_mean_mag']))**2.5
    ra_array = np.array(r_edr3['ra'])
    dec_array = np.array(r_edr3['dec'])

    c = SkyCoord(ra=ra_array * u.degree,
                 dec=dec_array * u.degree,
                 frame='icrs')
    x_pix, y_pix = wcs_image.world_to_pixel(c)

    fig = plt.figure(figsize=(13, 10))
    ax = plt.subplot(projection=wcs_image)
    iok = r_edr3['phot_g_mean_mag'] < args.brightlimit
    if args.nocolor:
        sc = ax.scatter(x_pix[iok],
                        y_pix[iok],
                        marker='*',
                        color='grey',
                        edgecolors='black',
                        linewidth=0.2,
                        s=symbol_size[iok])
        ax.scatter(x_pix[~iok],
                   y_pix[~iok],
                   marker='.',
                   color='grey',
                   edgecolors='black',
                   linewidth=0.2,
                   s=symbol_size[~iok])
    else:
        cmap = plt.cm.get_cmap('jet')
        sc = ax.scatter(x_pix[iok],
                        y_pix[iok],
                        marker='*',
                        edgecolors='black',
                        linewidth=0.2,
                        s=symbol_size[iok],
                        cmap=cmap,
                        c=r_edr3[iok]['bp_rp'],
                        vmin=-0.5,
                        vmax=2.0)
        ax.scatter(x_pix[~iok],
                   y_pix[~iok],
                   marker='.',
                   edgecolors='black',
                   linewidth=0.2,
                   s=symbol_size[~iok],
                   cmap=cmap,
                   c=r_edr3[~iok]['bp_rp'],
                   vmin=-0.5,
                   vmax=2.0)

    # display numbers if requested
    if not args.nonumbers:
        for irow in range(len(r_edr3)):
            number_csv = r_edr3[irow]['number_csv']
            text = r_edr3[irow][f'number_{outtypes[number_csv]}']
            ax.text(x_pix[irow],
                    y_pix[irow],
                    text,
                    color=outtypes_color[outtypes[number_csv]],
                    fontsize='5',
                    horizontalalignment='left',
                    verticalalignment='bottom')

    # stars outside the -0.5 < G_BP - G_RP < 2.0 colour cut
    if nstars_colorcut > 0:
        mask_colour = np.logical_or((r_edr3['bp_rp'] <= -0.5),
                                    (r_edr3['bp_rp'] >= 2.0))
        iok = np.argwhere(mask_colour)
        ax.scatter(x_pix[iok],
                   y_pix[iok],
                   s=240,
                   marker='D',
                   facecolors='none',
                   edgecolors='grey',
                   linewidth=0.5)

    # variable stars
    if nvariables > 0:
        sorter = np.argsort(r_edr3['source_id'])
        iok = np.array(sorter[np.searchsorted(r_edr3['source_id'],
                                              r_cross_var['dr3_source_id'],
                                              sorter=sorter)])
        ax.scatter(x_pix[iok],
                   y_pix[iok],
                   s=240,
                   marker='s',
                   facecolors='none',
                   edgecolors='blue',
                   linewidth=0.5)

    # stars in 15M sample
    if len(intersection) > 0:
        sorter = np.argsort(r_edr3['source_id'])
        iok = np.array(sorter[np.searchsorted(r_edr3['source_id'],
                                              np.array(list(intersection)),
                                              sorter=sorter)])
        ax.scatter(x_pix[iok],
                   y_pix[iok],
                   s=240,
                   marker='o',
                   facecolors='none',
                   edgecolors=outtypes_color['15m'],
                   linewidth=0.5)

    ax.scatter(0.03,
               0.96,
               s=240,
               marker='o',
               facecolors='white',
               edgecolors=outtypes_color['15m'],
               linewidth=0.5,
               transform=ax.transAxes)
    ax.text(0.06,
            0.96,
            'star in 15M sample',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='left',
            verticalalignment='center',
            transform=ax.transAxes)

    ax.scatter(0.03,
               0.92,
               s=240,
               marker='s',
               facecolors='white',
               edgecolors=outtypes_color['var'],
               linewidth=0.5,
               transform=ax.transAxes)
    ax.text(0.06,
            0.92,
            'variable in Gaia DR2',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='left',
            verticalalignment='center',
            transform=ax.transAxes)

    ax.scatter(0.03,
               0.88,
               s=240,
               marker='D',
               facecolors='white',
               edgecolors='grey',
               linewidth=0.5,
               transform=ax.transAxes)
    ax.text(0.06,
            0.88,
            'outside colour range',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='left',
            verticalalignment='center',
            transform=ax.transAxes)

    ax.set_xlabel('ra')
    ax.set_ylabel('dec')

    ax.set_aspect('equal')

    if not args.nocolor:
        cbaxes = fig.add_axes([0.683, 0.81, 0.15, 0.02])
        cbar = plt.colorbar(sc,
                            cax=cbaxes,
                            orientation='horizontal',
                            format='%1.0f')
        cbar.ax.tick_params(labelsize=12)
        cbar.set_label(label=r'$G_{\rm BP}-G_{\rm RP}$',
                       size=12,
                       backgroundcolor='white')

    ax.text(0.98,
            0.96,
            f'Field radius: {args.search_radius:.4f} degree',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='right',
            verticalalignment='center',
            transform=ax.transAxes)
    ax.text(0.02,
            0.06,
            r'$\alpha_{\rm center}$:',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='left',
            verticalalignment='bottom',
            transform=ax.transAxes)
    ax.text(0.25,
            0.06,
            f'{args.ra_center:.4f} degree',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='right',
            verticalalignment='bottom',
            transform=ax.transAxes)
    ax.text(0.02,
            0.02,
            r'$\delta_{\rm center}$:',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='left',
            verticalalignment='bottom',
            transform=ax.transAxes)
    ax.text(0.25,
            0.02,
            f'{args.dec_center:+.4f} degree',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='right',
            verticalalignment='bottom',
            transform=ax.transAxes)
    ax.text(0.98,
            0.02,
            f'RGBfromGaiaEDR3, version {VERSION}',
            fontsize=12,
            backgroundcolor='white',
            horizontalalignment='right',
            verticalalignment='bottom',
            transform=ax.transAxes)

    f = np.pi / 180
    xp = naxis1 / 2 + args.search_radius / pixscale * np.cos(
        np.arange(361) * f)
    yp = naxis2 / 2 + args.search_radius / pixscale * np.sin(
        np.arange(361) * f)
    ax.plot(xp, yp, '-', color='orange', linewidth=0.5, alpha=0.5)

    ax.set_xlim([-naxis1 * 0.12, naxis1 * 1.12])
    ax.set_ylim([-naxis2 * 0.05, naxis2 * 1.05])

    ax.set_axisbelow(True)
    overlay = ax.get_coords_overlay('icrs')
    overlay.grid(color='black', ls='dotted')

    plt.savefig(f'{args.basename}.pdf')
    plt.close(fig)
    if args.verbose:
        pass
    else:
        print('OK')
コード例 #42
0
ファイル: templates.py プロジェクト: akremin/desisim
    def make_templates(self, vrad_meansig=(0.0,200.0), rmagrange=(18.0,23.4),
                       gmagrange=(16.0,19.0)):
        """Build Monte Carlo set of spectra/templates for stars. 

        This function chooses random subsets of the continuum spectra for stars,
        adds radial velocity "jitter", then normalizes the spectrum to a
        specified r- or g-band magnitude.

        Args:
          vrad_meansig (float, optional): Mean and sigma (standard deviation) of the 
            radial velocity "jitter" (in km/s) that should be added to each
            spectrum.  Defaults to a normal distribution with a mean of zero and
            sigma of 200 km/s.
          rmagrange (float, optional): Minimum and maximum DECam r-band (AB)
            magnitude range.  Defaults to a uniform distribution between (18,23.4).
          gmagrange (float, optional): Minimum and maximum DECam g-band (AB)
            magnitude range.  Defaults to a uniform distribution between (16,19). 

        Returns:
          outflux (numpy.ndarray): Array [nmodel,npix] of observed-frame spectra [erg/s/cm2/A]. 
          meta (astropy.Table): Table of meta-data for each output spectrum [nmodel].

        Raises:

        """
        from astropy.table import Table, Column
        from desisim.io import write_templates
        from desispec.interpolation import resample_flux

        # Initialize the output flux array and metadata Table.
        outflux = np.zeros([self.nmodel,len(self.wave)]) # [erg/s/cm2/A]

        meta = Table()
        meta['TEMPLATEID'] = Column(np.zeros(self.nmodel,dtype='i4'))
        meta['REDSHIFT'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['GMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['RMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['LOGG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['TEFF'] = Column(np.zeros(self.nmodel,dtype='f4'))

        meta['LOGG'].unit = 'm/s^2'
        meta['TEFF'].unit = 'K'

        if self.objtype=='WD':
            comments = dict(
                TEMPLATEID = 'template ID',
                REDSHIFT = 'object redshift',
                GMAG = 'DECam g-band AB magnitude',
                RMAG = 'DECam r-band AB magnitude',
                ZMAG = 'DECam z-band AB magnitude',
                LOGG = 'log10 of the effective gravity',
                TEFF = 'stellar effective temperature'
            )
        else:
            meta['FEH'] = Column(np.zeros(self.nmodel,dtype='f4'))
            comments = dict(
                TEMPLATEID = 'template ID',
                REDSHIFT = 'object redshift',
                GMAG = 'DECam g-band AB magnitude',
                RMAG = 'DECam r-band AB magnitude',
                ZMAG = 'DECam z-band AB magnitude',
                LOGG = 'log10 of the effective gravity',
                TEFF = 'stellar effective temperature',
                FEH = 'log10 iron abundance relative to solar',
            )


        nobj = 0
        nbase = len(self.basemeta)
        nchunk = min(self.nmodel,500)

        Cuts = TargetCuts()
        while nobj<=(self.nmodel-1):
            # Choose a random subset of the base templates
            chunkindx = self.rand.randint(0,nbase-1,nchunk)

            # Assign uniform redshift and r-magnitude distributions.
            if self.objtype=='WD':
                gmag = self.rand.uniform(gmagrange[0],gmagrange[1],nchunk)
            else: 
                rmag = self.rand.uniform(rmagrange[0],rmagrange[1],nchunk)
                
            vrad = self.rand.normal(vrad_meansig[0],vrad_meansig[1],nchunk)
            redshift = vrad/2.99792458E5

            # Unfortunately we have to loop here.
            for ii, iobj in enumerate(chunkindx):
                zwave = self.basewave*(1.0+redshift[ii])
                restflux = self.baseflux[iobj,:] # [erg/s/cm2/A @10pc]

                # Normalize; Note that [grz]flux are in nanomaggies
                if self.objtype=='WD':
                    gnorm = 10.0**(-0.4*gmag[ii])/self.gfilt.get_maggies(zwave,restflux)
                    flux = restflux*gnorm # [erg/s/cm2/A, @redshift[ii]]

                    gflux = 10.0**(-0.4*(gmag[ii]-22.5))                      
                    rflux = self.rfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                    zflux = self.zfilt.get_maggies(zwave,flux)*10**(0.4*22.5)
                else:
                    rnorm = 10.0**(-0.4*rmag[ii])/self.rfilt.get_maggies(zwave,restflux)
                    flux = restflux*rnorm # [erg/s/cm2/A, @redshift[ii]]

                    rflux = 10.0**(-0.4*(rmag[ii]-22.5))                      
                    gflux = self.gfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                    zflux = self.zfilt.get_maggies(zwave,flux)*10**(0.4*22.5)

                # Color cuts on just on the standard stars.
                if self.objtype=='FSTD':
                    grzmask = [Cuts.FSTD(gflux=gflux,rflux=rflux,zflux=zflux)]
                elif self.objtype=='WD':
                    grzmask = [True]
                else:
                    grzmask = [True]

                if all(grzmask):
                    if ((nobj+1)%10)==0:
                        print('Simulating {} template {}/{}'.format(self.objtype,nobj+1,self.nmodel))
                    outflux[nobj,:] = resample_flux(self.wave,zwave,flux)

                    if self.objtype=='WD':
                        meta['TEMPLATEID'][nobj] = nobj
                        meta['REDSHIFT'][nobj] = redshift[ii]
                        meta['GMAG'][nobj] = gmag[ii]
                        meta['RMAG'][nobj] = -2.5*np.log10(rflux)+22.5
                        meta['ZMAG'][nobj] = -2.5*np.log10(zflux)+22.5
                        meta['LOGG'][nobj] = self.basemeta['LOGG'][iobj]
                        meta['TEFF'][nobj] = self.basemeta['TEFF'][iobj]
                    else:
                        meta['TEMPLATEID'][nobj] = nobj
                        meta['REDSHIFT'][nobj] = redshift[ii]
                        meta['GMAG'][nobj] = -2.5*np.log10(gflux)+22.5
                        meta['RMAG'][nobj] = rmag[ii]
                        meta['ZMAG'][nobj] = -2.5*np.log10(zflux)+22.5
                        meta['LOGG'][nobj] = self.basemeta['LOGG'][iobj]
                        meta['TEFF'][nobj] = self.basemeta['TEFF'][iobj]
                        meta['FEH'][nobj] = self.basemeta['FEH'][iobj]

                    nobj = nobj+1

                # If we have enough models get out!
                if nobj>=(self.nmodel-1):
                    break
                
        return outflux, self.wave, meta
コード例 #43
0
def SGroupwrite(outfile, SuperList, GroupList):

    NoSGroups = len(SuperList)

    myTable = Table()

    empty = []
    myTable.add_column(Column(data=empty, name='pgc', dtype=np.dtype(int)))
    myTable.add_column(Column(data=empty, name='flag', dtype=np.dtype(int)))
    myTable.add_column(Column(data=empty, name='ra', format='%0.4f'))
    myTable.add_column(
        Column(data=empty, name='dec', format='%0.4f', length=10))
    myTable.add_column(Column(data=empty, name='gl', format='%0.4f'))
    myTable.add_column(Column(data=empty, name='gb', format='%0.4f',
                              length=10))
    myTable.add_column(Column(data=empty, name='sgl', format='%0.4f'))
    myTable.add_column(
        Column(data=empty, name='sgb', format='%0.4f', length=10))

    myTable.add_column(Column(data=empty, name='Ks', format='%0.2f'))
    myTable.add_column(Column(data=empty, name='logK', format='%0.4f'))
    myTable.add_column(Column(data=empty, name='Vls', format='%0.0f'))
    myTable.add_column(Column(data=empty, name='dist', format='%0.2f'))
    myTable.add_column(Column(data=empty, name='mDist', format='%0.2f'))
    myTable.add_column(Column(data=empty, name='mDistErr', format='%0.2f'))

    myTable.add_column(Column(data=empty, name='sigmaP_dyn', format='%0.1f'))
    myTable.add_column(Column(data=empty, name='sigmaP_lum', format='%0.1f'))

    myTable.add_column(Column(data=empty, name='Mv_lum', format='%1.2e'))
    myTable.add_column(Column(data=empty, name='R2t_lum', format='%0.3f'))
    myTable.add_column(Column(data=empty, name='r1t_lum', format='%0.3f'))
    myTable.add_column(Column(data=empty, name='tX_lum', format='%1.2e'))

    myTable.add_column(
        Column(data=empty, name='No_Galaxies', dtype=np.dtype(int)))
    myTable.add_column(Column(data=empty, name='nest', dtype=np.dtype(int)))

    for i in range(0, NoSGroups):  # for all groups
        meanDist = 0.
        meanDistErr = 0.
        sumDist = 0
        sumError = 0
        for object in SuperList[i][1]:
            if object.mDist != 0 and object.mDistErr != 0:
                err = object.mDist * object.mDistErr
                sumDist += object.ngal * object.mDist / (err**2)
                sumError += 1.0 * object.ngal / (err**2)

        if sumDist != 0 and sumError != 0:
            meanDist = sumDist / sumError
            meanDistErr = sqrt(1. / sumError) / meanDist

        SuperList[i][0].mDist = meanDist
        SuperList[i][0].mDistErr = meanDistErr

        table_row(myTable, SuperList[i][0])
        for Group in SuperList[i][1]:
            table_row(myTable, Group)
    for Group in GroupList:
        if Group.flag <= 2:
            table_row(myTable, Group)

    pgc = 999999999
    ra = 999.9999
    dec = -99.99
    gl = ra
    gb = dec
    sgl = ra
    sgb = dec
    Ty = -100000.00
    B_mag = Ty
    Ks = 99.99
    logK = 99.9999
    Vls = 9999
    dcf2 = 99.99
    ed = 9.99
    Mv_dyn = 9.99E99
    Mv_lum = Mv_dyn
    tX_dyn = Mv_lum
    tX_lum = Mv_lum
    nest = 9999999

    flag = 0
    mDist = 0
    mDistErr = 0
    dist = 0
    sigmaP_dyn = 0
    sigmaP_lum = 0
    R2t_lum = 0
    r1t_lum = 0
    subGalaxies = 0

    myTable.add_row([pgc,flag,ra,dec,gl, gb, sgl,sgb,Ks,logK,Vls, dist, \
         mDist, mDistErr, sigmaP_dyn, sigmaP_lum, \
            Mv_lum, R2t_lum, r1t_lum, tX_lum, subGalaxies, nest])

    myTable.write(outfile,
                  format='ascii.fixed_width',
                  delimiter='|',
                  bookend=False)

    ### removing the last line, (it sits o adjust the column wodths)
    command = ["csh", "remove_lastline.csh", outfile]
    subprocess.call(command)
コード例 #44
0
ファイル: tableio.py プロジェクト: nudomarinero/LSMTool
def coneSearch(VOService, position, radius):
    """
    Returns table from a VO cone search.

    Parameters
    ----------
    VOService : str
        Name of VO service to query (must be one of 'WENSS' or 'NVSS')
    position : list of floats
        A list specifying a new position as [RA, Dec] in either makesourcedb
        format (e.g., ['12:23:43.21', '+22.34.21.2']) or in degrees (e.g.,
        [123.2312, 23.3422])
    radius : float or str, optional
        Radius in degrees (if float) or 'value unit' (if str; e.g.,
        '30 arcsec') for cone search region in degrees
    """
    import pyvo as vo

    log = logging.getLogger('LSMTool.Load')

    # Define allowed cone-search databases. These are the ones we know how to
    # convert to makesourcedb-formated sky models.
    columnMapping = {
        'nvss':{'NVSS':'name', 'RAJ2000':'ra', 'DEJ2000':'dec', 'S1.4':'i',
            'MajAxis':'majoraxis', 'MinAxis':'minoraxis', 'referencefrequency':1.4e9},
        'wenss':{'Name':'name', 'RAJ2000':'ra', 'DEJ2000':'dec', 'Sint':'i',
            'MajAxis':'majoraxis', 'MinAxis':'minoraxis', 'PA':'orientation',
            'referencefrequency':325e6}
        }

    if VOService.lower() in allowedVOServices:
        url = allowedVOServices[VOService.lower()]
    else:
        raise ValueError('VO query service not known. Allowed services are: '
            '{0}'.format(allowedVOServices.keys()))

    # Get raw VO catalog
    log.debug('Querying VO service...')
    try:
        position = [RA2Angle(position[0])[0].value, Dec2Angle(position[1])[0].value]
    except TypeError:
        raise ValueError('VO query positon not understood.')
    try:
        radius = Angle(radius, unit='degree').value
    except TypeError:
        raise ValueError('VO query radius not understood.')
    VOcatalog = vo.conesearch(url, position, radius=radius)

    log.debug('Creating table...')
    try:
        table = Table.read(VOcatalog.votable)
    except IndexError:
        # Empty query result
        log.error('No sources found. Sky model is empty.')
        table = makeEmptyTable()
        return table

    # Remove unneeded columns
    colsToRemove = []
    for colName in table.colnames:
        if colName not in columnMapping[VOService.lower()]:
            colsToRemove.append(colName)
        elif columnMapping[VOService.lower()][colName] not in allowedColumnNames:
            colsToRemove.append(colName)
    for colName in colsToRemove:
        table.remove_column(colName)

    # Rename columns to match makesourcedb conventions
    for colName in table.colnames:
        if colName != allowedColumnNames[columnMapping[VOService.lower()][colName]]:
            table.rename_column(colName, allowedColumnNames[columnMapping[
                VOService.lower()][colName]])

    # Convert RA and Dec to Angle objects
    log.debug('Converting RA...')
    RARaw = table['Ra'].data.tolist()
    RACol = Column(name='Ra', data=RA2Angle(RARaw))
    def raformat(val):
        return Angle(val, unit='degree').to_string(unit='hourangle', sep=':')
    RACol.format = raformat
    RAIndx = table.keys().index('Ra')
    table.remove_column('Ra')
    table.add_column(RACol, index=RAIndx)

    log.debug('Converting Dec...')
    DecRaw = table['Dec'].data.tolist()
    DecCol = Column(name='Dec', data=Dec2Angle(DecRaw))
    def decformat(val):
        return Angle(val, unit='degree').to_string(unit='degree', sep='.')
    DecCol.format = decformat
    DecIndx = table.keys().index('Dec')
    table.remove_column('Dec')
    table.add_column(DecCol, index=DecIndx)

    # Make sure Name is a str column
    NameRaw = table['Name'].data.tolist()
    NameCol = Column(name='Name', data=NameRaw, dtype='{}100'.format(numpy_type))
    table.remove_column('Name')
    table.add_column(NameCol, index=0)

    # Convert flux and axis values to floats
    for name in ['I', 'MajorAxis', 'MinorAxis', 'Orientation']:
        if name in table.colnames:
            indx = table.index_column(name)
            intRaw = table[name].data.tolist()
            floatCol = Column(name=name, data=intRaw, dtype='float')
            table.remove_column(name)
            table.add_column(floatCol, index=indx)


    # Add source-type column
    types = ['POINT'] * len(table)
    if 'majoraxis' in columnMapping[VOService.lower()].values():
        for i, maj in enumerate(table[allowedColumnNames['majoraxis']]):
            if maj > 0.0:
                types[i] = 'GAUSSIAN'
    col = Column(name='Type', data=types, dtype='{}100'.format(numpy_type))
    table.add_column(col, index=1)

    # Add reference-frequency column
    refFreq = columnMapping[VOService.lower()]['referencefrequency']
    col = Column(name='ReferenceFrequency', data=np.array([refFreq]*len(table), dtype=np.float))
    table.add_column(col)

    # Set column units and default values
    def fluxformat(val):
        return '{0:0.3f}'.format(val)
    for i, colName in enumerate(table.colnames):
        log.debug("Setting units for column '{0}' to {1}".format(
            colName, allowedColumnUnits[colName.lower()]))
        if colName == 'I':
            table.columns[colName].unit = 'mJy'
            table.columns[colName].convert_unit_to('Jy')
            table.columns[colName].format = fluxformat
        else:
            table.columns[colName].unit = allowedColumnUnits[colName.lower()]

        if hasattr(table.columns[colName], 'filled') and allowedColumnDefaults[colName.lower()] is not None:
            fillVal = allowedColumnDefaults[colName.lower()]
            if colName == 'SpectralIndex':
                while len(fillVal) < 1:
                    fillVal.append(0.0)
            log.debug("Setting default value for column '{0}' to {1}".
                format(colName, fillVal))
            table.columns[colName].fill_value = fillVal

    return table
コード例 #45
0
def table():
    return Table([
        Column([1, 2], 'a'),
        Column([1, 2] * u.m, 'b'),
        Column(['x', 'yy'], 'c'),
    ])