コード例 #1
0
ファイル: pz.py プロジェクト: cpadavis/destest
  def calc_bootstrap_sig8(test,dir,param,notomo=False,num_bootstrap=50):
    """
    Calculate bootstrap for cosmological parameters in spec validation tests.
    """

    from astropy.table import Table

    mean=[]
    for i in xrange(num_bootstrap):
      if notomo:
        mean0=Table.read(dir+test+'/out/sim_data_notomo_skynet_'+str(i)+'/means.txt', format='ascii')
      else:
        mean0=Table.read(dir+test+'/out/sim_data_skynet_'+str(i)+'/means.txt', format='ascii')

      for row in mean0:
        if param in row['parameter']:
          mean=np.append(mean,row['mean'])
    print mean

    # TODO [cpd]: Check if (N-1) / N or N / (N - 1)
    var=np.mean((mean-np.mean(mean))*(mean-np.mean(mean)))*(num_bootstrap - 1.) / num_bootstrap

    print var

    return np.sqrt(var)
コード例 #2
0
ファイル: FSM.py プロジェクト: nickkonidaris/rcrc
 def execute(self, inputs):
     
     now = datetime.now()
     ns = self.statetable[self.next_state_name]
     
     log.info("Executing %s" % self.next_state_name)
     self.prev_state_name = self.next_state_name
     self.next_state_name = ns.execute(self.prev_state_name, inputs)
     
     if self.prev_state_name != self.next_state_name:
         log.info("Transitioning %s->%s" % (self.prev_state_name, 
             self.next_state_name))
     
     elapsed = datetime.now() - now
     ns.elapsed += elapsed
     ns.n_times += 1
     
     dt = datetime.now()
     fn = "%4.4i_%2.2i_%2.2i.txt" % (dt.year, dt.month, dt.day)
     names = []
     times = []
     elapsed = []
     for statename, state in self.statetable.items():
         names.append(statename)
         times.append(state.n_times)
         elapsed.append(state.elapsed.seconds)
     
     table = Table([names, times, elapsed], 
         names=("State", "# times", "# sec"))
     table.write("s:/logs/states/%s" % fn, format="ascii.fixed_width_two_line")
コード例 #3
0
    def test_add_halocat_to_cache5(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)

        basename = 'abc.hdf5'
        fname = os.path.join(self.dummy_cache_baseloc, basename)
        _t = Table({'x': [0]})
        _t.write(fname, format='ascii')
        assert os.path.isfile(fname)

        dummy_string = '  '
        class Dummy(object):
            pass
            
            def __str__(self):
                raise TypeError
        not_representable_as_string = Dummy()

        with pytest.raises(HalotoolsError) as err:
            halocat.add_halocat_to_cache(
                fname, dummy_string, dummy_string, dummy_string, dummy_string, 
                overwrite = True, some_more_metadata = not_representable_as_string)
        substr = "keyword is not representable as a string."
        assert substr in err.value.message
コード例 #4
0
ファイル: fermi.py プロジェクト: adonath/gammapy
    def __init__(self, filename="$GAMMAPY_DATA/catalogs/fermi/gll_psc_v16.fit.gz"):
        filename = str(make_path(filename))

        with warnings.catch_warnings():  # ignore FITS units warnings
            warnings.simplefilter("ignore", u.UnitsWarning)
            table = Table.read(filename, hdu="LAT_Point_Source_Catalog")

        table_standardise_units_inplace(table)

        source_name_key = "Source_Name"
        source_name_alias = (
            "Extended_Source_Name",
            "0FGL_Name",
            "1FGL_Name",
            "2FGL_Name",
            "1FHL_Name",
            "ASSOC_TEV",
            "ASSOC1",
            "ASSOC2",
        )
        super().__init__(
            table=table,
            source_name_key=source_name_key,
            source_name_alias=source_name_alias,
        )

        self.extended_sources_table = Table.read(filename, hdu="ExtendedSources")
コード例 #5
0
ファイル: search.py プロジェクト: vilhelmp/splatsearch
def _create_astrotable(rows, column_names, column_dtypes, column_units):
    results = Table(data = rows , 
                    names = column_names, 
                    dtypes = column_dtypes)
    for i in _np.arange(len(column_units)):
        results.field(i).units = column_units[i]
    return results
コード例 #6
0
ファイル: graph.py プロジェクト: jmcgover/cplop
def filter_by_alpha(data, key, value):
    print("Filtering by %s == %s" % (key, str(value)))
    new_data = Table(data[0:0])
    for d in data:
        if d[key] == value:
            new_data.add_row(d)
    return new_data
コード例 #7
0
ファイル: dayarc.py プロジェクト: dlakaplan/geminiutil
def get_arcs(image, skypol=2, clip=3, fferr=0.015):
    """Fit arc image and create table with one-dimensional arc spectra.

    Parameters
    ----------
    image: ~fits.HDUList
        prepared image set with one extension for each chip
    skypol: ~int, optional
        degree with which image is fit along spatial direction (default: 2)
    clip: ~float, optional
        maximum sigma with which points are allowed to deviate from fit
        (default: 3)
    fferr: ~float, optional
        Flat field uncertainty, used for estimating uncertainties
        (default: 0.015)

    Returns
    -------
    Table with columns 'x' (positions on exposed part of CCD) and
    extracted fluxes 'f' (with dimension equal to number of extensions)
    """
    data = multiext_data(image)
    ron = multiext_header_value(image, 'RDNOISE')
    arc, chi2, _, ntbadl, ntbadh = extract.fitsky(data, ron=ron, skypol=skypol,
                                                  clip=clip, fferr=0.015,
                                                  ibadlimit=7)
    x = multiext_x_coords(image, 'CCDSEC').squeeze()
    arctab = Table([x.T, arc.T, chi2.T], names=('x', 'f', 'chi2'))
    arctab.meta['grwlen'] = image[0].header['GRWLEN']
    return arctab
コード例 #8
0
ファイル: decam.py プロジェクト: minzastro/lookup
 def load_data(self, catalog, ra, dec, radius):
     param = self.CATALOGS[catalog]
     coord = self._coord_search(ra, dec,
                                param['bricks'], param['radius'],
                                best=True)
     print('-'*40, coord)
     if coord is None:
         return '0%s' % catalog
     filename = param['brick_file'].format(*coord)
     base = self._build_basic_answer(catalog)
     print(filename, path.exists('cache/%s' % filename))
     if path.exists('cache/%s' % filename):
         result = self._coord_search(ra, dec, 'cache/%s' % filename,
                                     radius / 3600.)
         if result is None:
             # No data
             return '1%s' % catalog
         df = Table(result)
         table = html.fromstring(' '.join(df.pformat(html=True,
                                                      max_width=-1)[:-1]))
         table.attrib['border'] = '1'
         table.attrib['cellspacing'] = '0'
         base.append(table)
     else:
         with open('decam_downloads.links', 'a') as links:
             links.write('%s/%s\n' % (param['url'], filename))
         div = html.Element('div')
         div.text = 'Brick added to download list. Come back later'
         base.append(div)
     return tostring(base, method='html')
コード例 #9
0
ファイル: test_hdf5.py プロジェクト: MaxNoe/astropy
def test_write_nopath(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    with pytest.raises(ValueError) as exc:
        t1.write(test_file)
    assert exc.value.args[0] == "table path should be set via the path= argument"
コード例 #10
0
ファイル: test_hdf5.py プロジェクト: MaxNoe/astropy
def test_write_invalid_path(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    with pytest.raises(ValueError) as exc:
        t1.write(test_file, path='test/')
    assert exc.value.args[0] == "table path should end with table name, not /"
コード例 #11
0
ファイル: test_diff.py プロジェクト: Cadair/astropy
def test_tablediff():
    """
    Test diff-ing two simple Table objects.
    """
    a = Table.read("""name    obs_date    mag_b  mag_v
M31     2012-01-02  17.0   16.0
M82     2012-10-29  16.2   15.2
M101    2012-10-31  15.1   15.5""", format='ascii')
    b = Table.read("""name    obs_date    mag_b  mag_v
M31     2012-01-02  17.0   16.5
M82     2012-10-29  16.2   15.2
M101    2012-10-30  15.1   15.5
NEW     2018-05-08   nan    9.0""", format='ascii')
    f = io.StringIO()
    identical = report_diff_values(a, b, fileobj=f)
    assert not identical
    out = f.getvalue()
    assert out == ('     name  obs_date  mag_b mag_v\n'
                   '     ---- ---------- ----- -----\n'
                   '  a>  M31 2012-01-02  17.0  16.0\n'
                   '   ?                           ^\n'
                   '  b>  M31 2012-01-02  17.0  16.5\n'
                   '   ?                           ^\n'
                   '      M82 2012-10-29  16.2  15.2\n'
                   '  a> M101 2012-10-31  15.1  15.5\n'
                   '   ?               ^\n'
                   '  b> M101 2012-10-30  15.1  15.5\n'
                   '   ?               ^\n'
                   '  b>  NEW 2018-05-08   nan   9.0\n')

    # Identical
    assert report_diff_values(a, a, fileobj=f)
コード例 #12
0
ファイル: test_hdf5.py プロジェクト: MaxNoe/astropy
def test_read_missing_table(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    with h5py.File(test_file, 'w') as f:
        f.create_group('test').create_group('path')
    with pytest.raises(OSError) as exc:
        Table.read(test_file, path='test/path/table')
    assert exc.value.args[0] == "Path test/path/table does not exist"
コード例 #13
0
ファイル: fermi.py プロジェクト: dlennarz/gammapy
    def flux_points(self):
        """
        Differential flux points (`~gammapy.spectrum.FluxPoints`).
        """
        table = Table()
        table.meta['SED_TYPE'] = 'flux'
        e_ref = self._ebounds.log_centers
        table['e_ref'] = e_ref
        table['e_min'] = self._ebounds.lower_bounds
        table['e_max'] = self._ebounds.upper_bounds

        flux = self._get_flux_values()
        flux_err = self._get_flux_values('Unc_Flux')
        table['flux'] = flux
        table['flux_errn'] = np.abs(flux_err[:, 0])
        table['flux_errp'] = flux_err[:, 1]

        nuFnu = self._get_flux_values('nuFnu', 'erg cm-2 s-1')
        table['eflux'] = nuFnu
        table['eflux_errn'] = np.abs(nuFnu * flux_err[:, 0] / flux)
        table['eflux_errp'] = nuFnu * flux_err[:, 1] / flux

        # TODO: check if nuFnu is maybe integral flux
        table['dnde'] = (nuFnu * e_ref ** -2).to('TeV-1 cm-2 s-1')
        return FluxPoints(table)
コード例 #14
0
ファイル: star_hist.py プロジェクト: sot/mica
def get_gui_data(agasc_id):
    """
    Fetch guide/track history from Sybase for an agasc id

    :param agasc_id: AGASC id
    :returns: list of dicts of uses as guide stars
    """
    with DBI(dbi='sybase', server='sybase', user='******') as db:
        gui = db.fetchall(
            'select * from trak_stats_data where type != "FID" and id = {}'.format(
                agasc_id))
    if not len(gui):
        return []
    gui = Table(gui)
    gui.sort('kalman_datestart')
    # make list of dicts for use in light templates in kadi web app
    gui_table = []
    for s in gui:
        srec = {}
        # Use these columns as they are named from the mica acq stats table
        for col in ['type', 'obsid', 'obi', 'slot']:
            srec[col] = s[col]
        # rename these columns in the dictionary
        srec['date'] = s['kalman_datestart']
        srec['mag'] = s['mag_exp']
        srec['mag_obs'] = s['aoacmag_mean']
        srec['perc_not_track'] = s['not_tracking_samples'] * 100.0 / s['n_samples']
        gui_table.append(srec)
    return gui_table
コード例 #15
0
ファイル: core.py プロジェクト: gammapy/gammapy
    def from_hdulist(cls, hdulist, hdu1="SPECTRUM", hdu2="EBOUNDS"):
        """Create from `~astropy.io.fits.HDUList`."""
        counts_table = Table.read(hdulist[hdu1])
        ebounds = Table.read(hdulist[2])
        emin = ebounds["E_MIN"].quantity
        emax = ebounds["E_MAX"].quantity

        # Check if column are present in the header
        quality = None
        areascal = None
        backscal = None
        if "QUALITY" in counts_table.colnames:
            quality = counts_table["QUALITY"].data
        if "AREASCAL" in counts_table.colnames:
            areascal = counts_table["AREASCAL"].data
        if "BACKSCAL" in counts_table.colnames:
            backscal = counts_table["BACKSCAL"].data

        kwargs = dict(
            data=counts_table["COUNTS"],
            backscal=backscal,
            energy_lo=emin,
            energy_hi=emax,
            quality=quality,
            areascal=areascal,
            livetime=counts_table.meta["EXPOSURE"] * u.s,
            obs_id=counts_table.meta["OBS_ID"],
        )
        if hdulist[1].header["HDUCLAS2"] == "BKG":
            kwargs["is_bkg"] = True
        return cls(**kwargs)
コード例 #16
0
ファイル: test_photometry.py プロジェクト: mmorage/DRAGONS
def test_calculate_magnitudes():
    # Set up a simple REFCAT
    refcat = Table()
    refcat['NUMBER'] = np.arange(5) + 1

    for i, filt in enumerate('jhk'):
        refcat['{}mag'.format(filt)] = np.array(float(i) + refcat['NUMBER'], dtype='f4')
        refcat['{}mag_err'.format(filt)] = np.array([0.1] * len(refcat), dtype='f4')

    # Simple
    prims._calculate_magnitudes(refcat, ['h'])

    assert all(refcat['filtermag'].data == refcat['hmag'].data)
    assert all(refcat['filtermag_err'].data == refcat['hmag_err'].data)

    refcat.remove_columns(['filtermag', 'filtermag_err'])

    # Constant offset
    prims._calculate_magnitudes(refcat, ['h', (0.1,0.1)])

    assert all(refcat['filtermag'].data == refcat['hmag'].data+0.1)
    assert all(abs(refcat['filtermag_err'].data - np.sqrt(0.01+refcat['hmag_err'].data**2)) < 0.001)

    refcat.remove_columns(['filtermag', 'filtermag_err'])

    # Colour term
    prims._calculate_magnitudes(refcat, ['h', (1.0,0.1,'j-h')])

    assert all(refcat['filtermag'].data == refcat['jmag'].data)

    # This holds because J-H=-1 for all rows
    assert all(abs(refcat['filtermag_err'].data - 0.2) < 0.001)
コード例 #17
0
ファイル: NeStarPar.py プロジェクト: pacargile/NeStarPar
	def __init__(self,stripeindex=None):
 		if stripeindex == None:
 			BCfile = MISTFILE_default
 		else:
 			BCfile = '/n/regal/conroy_lab/pac/MISTFILES/MIST_full_{0}.h5'.format(stripeindex)

 		# read in MIST hdf5 table
 		MISTh5 = h5py.File(BCfile,'r')
 		# determine the BC datasets
 		BCTableList = [x for x in MISTh5.keys() if x[:3] == 'BC_']
 		# read in each BC dataset and pull the photometric information
 		for BCT in BCTableList:
	 		BCTABLE = Table(np.array(MISTh5[BCT]))
			if BCT == BCTableList[0]:
				BC = BCTABLE.copy()
			else:
				BCTABLE.remove_columns(['Teff', 'logg', '[Fe/H]', 'Av', 'Rv'])
				BC = hstack([BC,BCTABLE])

 		BC_AV0 = BC[BC['Av'] == 0.0]

		self.bands = BC.keys()
		[self.bands.remove(x) for x in ['Teff', 'logg', '[Fe/H]', 'Av', 'Rv']]

		self.redintr = LinearNDInterpolator(
			(BC['Teff'],BC['logg'],BC['[Fe/H]'],BC['Av']),
			np.stack([BC[bb] for bb in self.bands],axis=1),
			rescale=True
			)
		self.redintr_0 = LinearNDInterpolator(
			(BC_AV0['Teff'],BC_AV0['logg'],BC_AV0['[Fe/H]']),
			np.stack([BC_AV0[bb] for bb in self.bands],axis=1),
			rescale=True
			)
コード例 #18
0
def filter_catalog(singlecat,matchedcat,fitsimage,outname,auxcatname,options=None):
    if options is None:
        options = o

    if options['restart'] and os.path.isfile(outname):
        warn('File ' + outname +' already exists, skipping source filtering step')
    else:

        matchedcat = Table.read(matchedcat)
        singlecat = Table.read(singlecat)

        fitsimage = fits.open(fitsimage)

        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()

        print 'Originally',len(matchedcat),'sources'
        matchedcat=filter_catalogue(matchedcat,fieldra,fielddec,3.0)

        print '%i sources after filtering for 3.0 deg from centre' % len(matchedcat)

        matchedcat=matchedcat[matchedcat['DC_Maj']<10.0] # ERROR!

        print '%i sources after filtering for sources over 10arcsec in LOFAR' % len(matchedcat)

        # not implemented yet!
        #tooextendedsources_aux = np.array(np.where(matchedcat[1].data[options['%s_match_majkey2'%auxcatname]] > options['%s_filtersize'%auxcatname])).flatten()
        #print '%s out of %s sources filtered out as over %sarcsec in %s'%(np.size(tooextendedsources_aux),len(allsources),options['%s_filtersize'%auxcatname],auxcatname)

        matchedcat=select_isolated_sources(matchedcat,30.0)
        print '%i sources after filtering for isolated sources in LOFAR' % len(matchedcat)

        matchedcat.write(outname)
コード例 #19
0
ファイル: catalog.py プロジェクト: cpadavis/destest
  def download_cat_desdm(query,name='gold',table='NSEVILLA.Y1A1_GOLD_1_0_1',dir='/share/des/sv/ngmix/v010/',order=True,num=1000000,start=0):

    from astropy.table import Table
    from desdb import Connection

    conn = Connection()

    if order:
      sorder='order by coadd_objects_id'
    else:
      sorder=''

    for tile in range(140):
      if tile<start:
        continue
      print tile
      if num==0:
        q = 'select '+query+' from '+table
      else:
        q = 'select '+query+' from ( select /*+ FIRST_ROWS(n) */ A.*, ROWNUM rnum from ( select * from '+table+' '+sorder+') A where ROWNUM < '+str((tile+1.)*num)+' ) where rnum  >= '+str(tile*num)
      print q
      data=conn.quick(q, array=False)
      params = data[0].keys()
      tables = {}
      for p in params:
        arr = [(d[p] if (d[p] is not None) else np.nan) for d in data ]
        arr = np.array(arr)
        tables[p] = arr
      t = Table(tables)
      t.write(dir+name+'_'+str(tile)+'.fits.gz')
      if num==0:
        break

    return
コード例 #20
0
ファイル: line_utils.py プロジェクト: banados/linetools
def transtable_from_speclines(speclines, add_keys=None):
    """Generate a Table summarizing the transitions from a list of SpectralLines
    Parameters
    ----------
    speclines : list of SpectralLine objects
    add_keys : list, optional
      Additional keys to include in Table

    Returns
    -------
    tbl : Table

    """
    keys = ['wrest','name','Z', 'ion', 'Ej', 'z', 'EW', 'sig_EW']
    if isinstance(speclines[0], AbsLine):
        keys += ['flag_N', 'logN', 'sig_logN']
    if add_keys is not None:
        keys += add_keys

    # Get started
    tbl = Table()

    # Loop to my loop
    for key in keys:
        tbl[key] = parse_speclines(speclines, key, mk_array=True)

    # Sort
    tbl.sort(['Z','ion','Ej','wrest'])
    # Return
    return tbl
コード例 #21
0
    def read(cls, filename):
        """
        Read convolved flux from a FITS file

        Parameters
        ----------
        filename : str
            The name of the FITS file to read the convolved fluxes from
        """

        from astropy.io import fits
        from astropy.table import Table

        conv = cls()

        # Open the convolved flux FITS file
        convolved = fits.open(filename)

        keywords = convolved[0].header

        # Try and read in the wavelength of the filter
        if 'FILTWAV' in keywords:
            conv.central_wavelength = keywords['FILTWAV'] * u.micron
        else:
            conv.central_wavelength = None

        # Read in apertures, if present
        try:
            ta = Table.read(convolved['APERTURES'])
            if ta['APERTURE'].unit is None:
                ta['APERTURE'].unit = u.au
            conv.apertures = ta['APERTURE'].data * ta['APERTURE'].unit
        except KeyError:
            pass

        # Create shortcuts to table
        tc = Table.read(convolved['CONVOLVED FLUXES'])

        # Read in model names
        conv.model_names = tc['MODEL_NAME']

        # Read in flux and flux errors

        if tc['TOTAL_FLUX'].unit is None:
            tc['TOTAL_FLUX'].unit = u.mJy

        if tc['TOTAL_FLUX_ERR'].unit is None:
            tc['TOTAL_FLUX_ERR'].unit = u.mJy

        if tc['TOTAL_FLUX'].ndim == 1 and conv.n_ap == 1:
            conv.flux = tc['TOTAL_FLUX'].data.reshape(tc['TOTAL_FLUX'].shape[0], 1) * tc['TOTAL_FLUX'].unit
        else:
            conv.flux = tc['TOTAL_FLUX'].data * tc['TOTAL_FLUX'].unit

        if tc['TOTAL_FLUX_ERR'].ndim == 1 and conv.n_ap == 1:
            conv.error = tc['TOTAL_FLUX_ERR'].data.reshape(tc['TOTAL_FLUX_ERR'].shape[0], 1) * tc['TOTAL_FLUX_ERR'].unit
        else:
            conv.error = tc['TOTAL_FLUX_ERR'].data * tc['TOTAL_FLUX_ERR'].unit

        return conv
コード例 #22
0
ファイル: specfit.py プロジェクト: lewyh/specfit
def bestfits(ID):
	means = []
	medians = []
	q_84s = []
	q_16s = []
	filename=('tables/ndim_' + str(ndim) + '_walkers_' + str(nwalkers) + '_nruns_'+ str(nruns) + '.fits')
	data = Table.read(filename)
	mean = [np.mean(data[key]) for key in param_keys]
	means.append(mean)
	median = [np.median(data[key]) for key in param_keys]
	medians.append(median)
	q_84 = [ np.percentile(data[key],84) for key in param_keys ]
	q_84s.append(q_84)
	q_16 = [ np.percentile(data[key],16) for key in param_keys ]
	q_16s.append(q_16)

	q_84s = np.array(q_84s)
	q_16s = np.array(q_16s)
	means = np.array(means)
	medians = np.array(medians)

	outdata = [ means[:,0], means[:,1], means[:,2], means[:,3], means[:,4], medians[:,0], medians[:,1], medians[:,2], medians[:,3], medians[:,4], q_16s[:,0], q_16s[:,1], q_16s[:,2], q_16s[:,3], q_16s[:,4], q_84s[:,0], q_84s[:,1],q_84s[:,2], q_84s[:,3], q_84s[:,4] ]
	colnames = ['Teff_mean','logg_mean','Vrot_mean','[M/H]_mean','Sky_mean','Teff_median','logg_median','Vrot_median','[M/H]_median','Sky_median','Teff_q_16','logg_q_16','Vrot_q_16','[M/H]_q_16','Sky_q_16','Teff_q_84','logg_q_84','Vrot_q_84','[M/H]_q_84','Sky_q_84']
	table = Table(outdata, names=colnames)
	filename = 'tables/averages.fits'
	Table.write(table, filename, overwrite=True)
コード例 #23
0
    def test_add_ptclcat_to_cache4(self):
        """ Enforce string representation of positional arguments
        """
        ptclcat = UserSuppliedPtclCatalog(Lbox=200,
            particle_mass=100, redshift=self.redshift,
            **self.good_ptclcat_args)

        basename = 'abc.hdf5'
        fname = os.path.join(self.dummy_cache_baseloc, basename)
        _t = Table({'x': [0]})
        _t.write(fname, format='ascii')
        assert os.path.isfile(fname)

        dummy_string = '  '

        class Dummy(object):
            pass

            def __str__(self):
                raise TypeError
        not_representable_as_string = Dummy()

        with pytest.raises(HalotoolsError) as err:
            ptclcat.add_ptclcat_to_cache(
                fname, not_representable_as_string, dummy_string, dummy_string,
                overwrite=True)
        substr = "must all be strings."
        assert substr in err.value.args[0]
コード例 #24
0
ファイル: core.py プロジェクト: fjones36/astroquery
def _create_bibcode_table(data, splitter):
    ref_list = [splitter + ref for ref in data.split(splitter)][2:]
    max_len = max([len(r) for r in ref_list])
    table = Table(names=['References'], dtypes=['S%i' % max_len])
    for ref in ref_list:
        table.add_row([ref.decode('utf-8')])
    return table
コード例 #25
0
ファイル: ebossfit.py プロジェクト: imcgreer/simqso
def make_coreqso_table(dr14qso,ebosstarg):
    if isinstance(dr14qso,str):
        dr14qso = Table.read(dr14qso)
    if isinstance(ebosstarg,str):
        ebosstarg = Table.read(ebosstarg)
    #
    dr14coo = SkyCoord(dr14qso['RA'],dr14qso['DEC'],unit=u.deg)
    # restrict to CORE quasar targets
    ii = np.where(ebosstarg['EBOSS_TARGET1'] & (1<<10) > 0)[0]
    ebosstarg = ebosstarg[ii]
    ebosstargcoo = SkyCoord(ebosstarg['RA'],ebosstarg['DEC'],unit=u.deg)
    # now identify confirmed quasars from DR14 in the target list
    m1,m2,sep,_ = dr14coo.search_around_sky(ebosstargcoo,2*u.arcsec)
    # for some reason there is a repeated entry...
    _,ii = np.unique(m1,return_index=True)
    dr14qso = dr14qso[m2[ii]]
    # just a sanity check
    jj = np.where(dr14qso['EXTINCTION']>0)[0]
    assert np.allclose(dr14qso['EXTINCTION'][jj],
                       ebosstarg['EXTINCTION'][m1[ii[jj]]],atol=1e-3)
    # extract all the WISE columns from targeting
    wisecols = ['W1_MAG','W1_MAG_ERR',
                'W1_NANOMAGGIES','W1_NANOMAGGIES_IVAR',
                'W2_NANOMAGGIES','W2_NANOMAGGIES_IVAR',
                'HAS_WISE_PHOT']
    # overwriting the DR14Q flux fields because they have invalid entries
    for k in wisecols + ['EXTINCTION','PSFFLUX','PSFFLUX_IVAR']:
        dr14qso[k] = ebosstarg[k][m1[ii]]
    dr14qso.write('ebosscore_dr14q.fits',overwrite=True)
コード例 #26
0
    def test_add_ptclcat_to_cache1(self):
        """ Verify the overwrite requirement is enforced
        """
        ptclcat = UserSuppliedPtclCatalog(Lbox=200,
            particle_mass=100, redshift=self.redshift,
            **self.good_ptclcat_args)

        basename = 'abc'
        fname = os.path.join(self.dummy_cache_baseloc, basename)
        _t = Table({'x': [0]})
        _t.write(fname, format='ascii')
        assert os.path.isfile(fname)

        dummy_string = '  '
        with pytest.raises(HalotoolsError) as err:
            ptclcat.add_ptclcat_to_cache(
                fname, dummy_string, dummy_string, dummy_string)
        substr = "Either choose a different fname or set ``overwrite`` to True"
        assert substr in err.value.args[0]

        with pytest.raises(HalotoolsError) as err:
            ptclcat.add_ptclcat_to_cache(
                fname, dummy_string, dummy_string, dummy_string,
                overwrite=True)
        assert substr not in err.value.args[0]
コード例 #27
0
ファイル: overlap.py プロジェクト: mwcraig/gammapy
def read_model_components(cfg_file):
    """Read model components from ``model_components/*.fits`` and return
    a list of 2D component images with containment masks.
    """
    cfg = configobj.ConfigObj(cfg_file)
    column_names = ('Name', 'Type', 'GLON', 'GLAT', 'Sigma', 'Norm')
    column_types = ('S25', 'S25', np.float32, np.float32, np.float32, np.float32)
    component_table = Table(names=column_names, dtype=column_types)
    
    # Build data table
    for component in cfg.keys():
        type_ = cfg[component]['Type']
        glon = cfg[component]['GLON']
        glat = cfg[component]['GLAT']
        sigma = cfg[component]['Sigma']
        norm = cfg[component]['Norm']
        component_table.add_row([component, type_, glon, glat, sigma, norm])
    if os.path.exists('model_components/'):
        read_fits_files(component_table)
    else:
        logging.error('No model components found. Please reuse morph_fit.')
    if os.path.exists('fit.reg'):
        read_region_file(component_table)
    else:
        compute_containment_radius(component_table)
        logging.info('Computing containment radii')
    return component_table
コード例 #28
0
ファイル: linelist.py プロジェクト: astrofrog/specviz
    def extract_range(self, wmin, wmax):
        ''' Builds a LineList instance out of self, with
            the subset of lines that fall within the
            wavelength range defined by 'wmin' and 'wmax'

        :param wmin: float
            minimum wavelength of the wavelength range
        :param wmax: float
            maximum wavelength of the wavelength range
        :return: LineList
            line list with subset of lines
        '''
        wavelengths = self[WAVELENGTH_COLUMN].quantity

        # convert wavelenghts in line list to whatever
        # units the wavelength range is expressed in.
        new_wavelengths = wavelengths.to(wmin.unit)

        # 'indices' points to rows with wavelength values
        # that lie outside the wavelength range.
        indices = np.where((new_wavelengths.value < wmin.value) |
                           (new_wavelengths.value > wmax.value))

        # make copy of self and remove unwanted lines from the copy.
        result = Table(self)
        result.remove_rows(indices)

        return result
コード例 #29
0
ファイル: cube.py プロジェクト: OlgaVorokh/gammapy
    def to_table(self):
        """Convert cube to astropy table format.

        The name of the table is stored in the table meta information
        under the keyword 'name'.

        Returns
        -------
        table : `~astropy.table.Table`
            Table containing the cube.
        """
        # data arrays
        a_coordx_lo = Quantity([self.coordx_edges[:-1]])
        a_coordx_hi = Quantity([self.coordx_edges[1:]])
        a_coordy_lo = Quantity([self.coordy_edges[:-1]])
        a_coordy_hi = Quantity([self.coordy_edges[1:]])
        a_energy_lo = Quantity([self.energy_edges[:-1]])
        a_energy_hi = Quantity([self.energy_edges[1:]])
        a_data = Quantity([self.data])

        # table
        table = Table()
        table[self.scheme_dict['coordx_fits_name'] + '_LO'] = a_coordx_lo
        table[self.scheme_dict['coordx_fits_name'] + '_HI'] = a_coordx_hi
        table[self.scheme_dict['coordy_fits_name'] + '_LO'] = a_coordy_lo
        table[self.scheme_dict['coordy_fits_name'] + '_HI'] = a_coordy_hi
        table[self.scheme_dict['energy_fits_name'] + '_LO'] = a_energy_lo
        table[self.scheme_dict['energy_fits_name'] + '_HI'] = a_energy_hi
        table[self.scheme_dict['data_fits_name']] = a_data

        table.meta['name'] = self.scheme_dict['hdu_fits_name']

        return table
コード例 #30
0
def richness(group_id): 
    gals = Table() 
    gals['groupid'] = group_id
    gals['dummy'] = 1
    grouped_table = gals.group_by('groupid')
    grp_richness = grouped_table['dummy'].groups.aggregate(np.sum)
    return grp_richness
コード例 #31
0
ファイル: build_hosts.py プロジェクト: lao19881213/FRB
def build_host_190608(run_ppxf=False, build_photom=False):
    """ Build the host galaxy data for FRB 190608

    All of the data comes from Bhandrari+2020, ApJL, in press

    Args:
        build_photom (bool, optional):
    """
    frbname = '190608'
    gal_coord = SkyCoord('J221604.90-075356.0',
                         unit=(units.hourangle,
                               units.deg))  # Cherie;  07-Mar-2019

    # Instantiate
    host190608 = frbgalaxy.FRBHost(gal_coord.ra.value, gal_coord.dec.value,
                                   frbname)

    # Load redshift table
    ztbl = Table.read(os.path.join(db_path, 'CRAFT', 'Bhandari2019',
                                   'z_SDSS.ascii'),
                      format='ascii.fixed_width')
    z_coord = SkyCoord(ra=ztbl['RA'], dec=ztbl['DEC'], unit='deg')
    idx, d2d, _ = match_coordinates_sky(gal_coord, z_coord, nthneighbor=1)
    if np.min(d2d) > 0.5 * units.arcsec:
        embed(header='190608')
    # Redshift -- SDSS
    host190608.set_z(ztbl[idx]['ZEM'], 'spec')

    # Morphology
    #host190608.parse_galfit(os.path.join(photom_path, 'CRAFT', 'Bannister2019',
    #                               'HG180924_galfit_DES.log'), 0.263)

    # Photometry

    # SDSS
    # Grab the table (requires internet)
    photom_file = os.path.join(db_path, 'CRAFT', 'Bhandari2019',
                               'bhandari2019_photom.ascii')
    if build_photom:
        # VLT
        search_r = 1 * units.arcsec
        # SDSS
        sdss_srvy = sdss.SDSS_Survey(gal_coord, search_r)
        sdss_tbl = sdss_srvy.get_catalog(print_query=True)
        sdss_tbl['Name'] = 'HG190608'
        photom = frbphotom.merge_photom_tables(sdss_tbl, photom_file)
        # WISE
        wise_srvy = wise.WISE_Survey(gal_coord, search_r)
        wise_tbl = wise_srvy.get_catalog(print_query=True)
        wise_tbl['Name'] = 'HG190608'
        # Write
        photom = frbphotom.merge_photom_tables(wise_tbl, photom, debug=True)
        photom.write(photom_file,
                     format=frbphotom.table_format,
                     overwrite=True)
    # Parse
    host190608.parse_photom(
        Table.read(photom_file, format=frbphotom.table_format))

    # PPXF
    results_file = os.path.join(db_path, 'CRAFT', 'Bhandari2019',
                                'HG190608_SDSS_ppxf.ecsv')
    if run_ppxf:
        meta, spectrum = host190608.get_metaspec(instr='SDSS')
        spec_fit = None
        ppxf.run(spectrum,
                 2000.,
                 host190608.z,
                 results_file=results_file,
                 spec_fit=spec_fit,
                 chk=True)
    host190608.parse_ppxf(results_file)

    # Derived quantities

    # AV
    host190608.calc_nebular_AV('Ha/Hb')

    # SFR
    host190608.calc_nebular_SFR('Ha')
    #host.derived['SFR_nebular_err'] = -999.

    # CIGALE
    host190608.parse_cigale(
        os.path.join(db_path, 'CRAFT', 'Bhandari2019', 'HG190608_CIGALE.fits'))
    # Vet all
    host190608.vet_all()

    # Write -- BUT DO NOT ADD TO REPO (YET)
    path = resource_filename('frb', 'data/Galaxies/{}'.format(frbname))
    host190608.write_to_json(path=path)
コード例 #32
0
def setup_batch_beast_fit(
    num_percore=5,
    nice=None,
    overwrite_logfile=True,
    prefix=None,
    use_sd=True,
    nsubs=1,
    nprocs=1,
):
    """
    Sets up batch files for submission to the 'at' queue on
    linux (or similar) systems

    Parameters
    ----------
    num_percore : int (default = 5)
        number of fitting runs per core

    nice : int (default = None)
        set this to an integer (-20 to 20) to prepend a "nice" level
        to the fitting command

    overwrite_logfile : boolean (default = True)
        if True, will overwrite the log file; if False, will append to
        existing log file

    prefix : string (default=None)
        Set this to a string (such as 'source activate astroconda') to prepend
        to each batch file (use '\n's to make multiple lines)

    use_sd : boolean (default=True)
        If True, split runs based on source density (determined by finding
        matches to datamodel.astfile with SD info)

    nsubs : int (default=1)
        number of subgrids used for the physics model

    nprocs : int (default=1)
        Number of parallel processes to use when doing the fitting
        (currently only implemented for subgrids)


    Returns
    -------
    run_info_dict : dict
        Dictionary indicating which catalog files have complete modeling, and
        which job files need to be run

    """

    # before doing ANYTHING, force datamodel to re-import (otherwise, any
    # changes within this python session will not be loaded!)
    importlib.reload(datamodel)
    # check input parameters
    verify_params.verify_input_format(datamodel)

    # setup the subdirectory for the batch and log files
    job_path = datamodel.project + "/fit_batch_jobs/"
    if not os.path.isdir(job_path):
        os.mkdir(job_path)

    log_path = job_path + "logs/"
    if not os.path.isdir(log_path):
        os.mkdir(log_path)

    # get file name lists (to check if they exist and/or need to be resumed)
    file_dict = create_filenames.create_filenames(use_sd=use_sd, nsubs=nsubs)

    # - input files
    photometry_files = file_dict["photometry_files"]
    # modelsedgrid_files = file_dict['modelsedgrid_files']
    # noise_files = file_dict['noise_files']

    # - output files
    stats_files = file_dict["stats_files"]
    pdf_files = file_dict["pdf_files"]
    lnp_files = file_dict["lnp_files"]

    # - total number of files
    n_files = len(photometry_files)

    # - other useful info
    sd_sub_info = file_dict["sd_sub_info"]
    gridsub_info = file_dict["gridsub_info"]

    # names of output log files
    log_files = []

    # initialize a variable name (otherwise it got auto-added in the wrong
    # place and broke the code)
    pf = None

    for i in range(n_files):

        sd_piece = ""
        if use_sd is True:
            sd_piece = "_bin" + sd_sub_info[i][0] + "_sub" + sd_sub_info[i][1]

        gridsub_piece = ""
        if nsubs > 1:
            gridsub_piece = "_gridsub" + str(gridsub_info[i])

        log_files.append("beast_fit" + sd_piece + gridsub_piece + ".log")

    # start making the job files!

    pf_open = False
    cur_f = 0
    cur_total_size = 0.0
    j = -1

    # keep track of which files are done running
    run_info_dict = {
        "phot_file": photometry_files,
        "done": np.full(n_files, False),
        "files_to_run": [],
    }

    for i, phot_file in enumerate(photometry_files):

        print("")

        # check if this is a full run
        reg_run = False
        run_done = False
        if not os.path.isfile(stats_files[i]):
            reg_run = True
            print("no stats file")
        if not os.path.isfile(pdf_files[i]):
            reg_run = True
            print("no pdf1d file")
        if not os.path.isfile(lnp_files[i]):
            reg_run = True
            print("no lnp file")

        # first check if the pdf1d mass spacing is correct
        if not reg_run:
            hdulist = fits.open(pdf_files[i])
            delta1 = hdulist["M_ini"].data[-1, 1] - hdulist["M_ini"].data[-1,
                                                                          0]
            if delta1 > 1.0:  # old linear spacing
                print("pdf1d lin mass spacing - full refitting needed")
                old_mass_spacing = True
            else:
                old_mass_spacing = False
                print("pdf1d log mass spacing - ok")

            if old_mass_spacing:
                run_done = False
                reg_run = True

        # now check if the number of results is the same as
        #    the number of observations
        if not reg_run:
            # get the observed catalog
            obs = Table.read(phot_file)

            # get the fit results catalog
            t = Table.read(stats_files[i])
            # get the number of stars that have been fit
            (indxs, ) = np.where(t["Pmax"] != 0.0)

            # get the number of entries in the lnp file
            f = tables.open_file(lnp_files[i], "r")
            nlnp = f.root._v_nchildren - 2
            f.close()

            print("# obs, stats, lnp = ", len(obs), len(indxs), nlnp)
            if (len(indxs) == len(obs)) & (nlnp == len(obs)):

                # final check, is the pdf1d file correctly populated
                tot_prob = np.sum(hdulist["M_ini"].data, axis=1)
                (tindxs, ) = np.where(tot_prob > 0.0)
                print("# good pdf1d = ", len(tindxs) - 1)
                if len(tindxs) == (len(obs) + 1):
                    run_done = True

        if run_done:
            print(stats_files[i] + " done")
            run_info_dict["done"][i] = True
        else:
            j += 1
            if j % num_percore == 0:
                cur_f += 1

                # close previous files
                if j != 0:
                    pf.close()
                    # slurm needs the job file to be executable
                    os.chmod(joblist_file,
                             stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)

                    print(
                        "total sed_trim size [Gb] = ",
                        cur_total_size / (1024.0 * 1024.0 * 1024.0),
                    )
                    cur_total_size = 0.0

                # open the slurm and param files
                pf_open = True
                joblist_file = job_path + "beast_batch_fit_" + str(
                    cur_f) + ".joblist"
                pf = open(joblist_file, "w")
                run_info_dict["files_to_run"].append(joblist_file)

                # write out anything at the beginning of the file
                if prefix is not None:
                    pf.write(prefix + "\n")

            # flag for resuming
            resume_str = ""
            if reg_run:
                print(stats_files[i] + " does not exist " +
                      "- adding job as a regular fit job (not resume job)")
            else:
                print(stats_files[i] +
                      " not done - adding to continue fitting list (" +
                      str(len(indxs)) + "/" + str(len(t["Pmax"])) + ")")
                resume_str = "-r"

            # prepend a `nice` value
            nice_str = ""
            if nice is not None:
                nice_str = "nice -n" + str(int(nice)) + " "

            # choose whether to append or overwrite log file
            pipe_str = " > "
            if not overwrite_logfile:
                pipe_str = " >> "

            # set SD+sub option
            sd_str = ""
            if use_sd is True:
                sd_str = ' --choose_sd_sub "{0}" "{1}" '.format(
                    sd_sub_info[i][0], sd_sub_info[i][1])

            # set gridsub option
            gs_str = ""
            if nsubs > 1:
                gs_str = " --choose_subgrid {0} ".format(gridsub_info[i])

            job_command = (nice_str +
                           "python -m beast.tools.run.run_fitting " +
                           resume_str + sd_str + gs_str + " --nsubs " +
                           str(nsubs) + " --nprocs " + str(nprocs) + pipe_str +
                           log_path + log_files[i])

            pf.write(job_command + "\n")

    if pf_open:
        pf.close()

        # slurm needs the job file to be executable
        os.chmod(joblist_file, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)

    # return the info about completed modeling
    return run_info_dict
コード例 #33
0
ファイル: build_hosts.py プロジェクト: lao19881213/FRB
def build_host_190102(run_ppxf=False, build_photom=False):
    """ Build the host galaxy data for FRB 190102

    All of the data comes from Bhandrari+2020, ApJL, in press

    Args:
        build_photom (bool, optional):
    """
    frbname = '190102'
    # Stuart on June 17, 2019
    #  -- Astrometry.net !
    gal_coord = SkyCoord('J212939.60-792832.4',
                         unit=(units.hourangle,
                               units.deg))  # Cherie;  07-Mar-2019
    # Instantiate
    host190102 = frbgalaxy.FRBHost(gal_coord.ra.value, gal_coord.dec.value,
                                   frbname)

    # Redshift -- Gaussian fit to [OIII 5007] in MagE
    #  Looks great on the other lines
    #  Ok on FORS2 Halpha
    wv_oiii = 6466.48
    z_OIII = wv_oiii / 5008.239 - 1
    host190102.set_z(z_OIII, 'spec')

    photom_file = os.path.join(db_path, 'CRAFT', 'Bhandari2019',
                               'bhandari2019_photom.ascii')
    # VLT/FORS2 -- Pulled from draft on 2019-06-23
    # VLT/FORS2 -- Pulled from spreadsheet 2019-06-23
    if build_photom:
        photom = Table()
        photom['ra'] = [host190102.coord.ra.value]
        photom['dec'] = host190102.coord.dec.value
        photom['Name'] = host190102.name
        photom['VLT_u'] = 23.  # Dust corrected
        photom['VLT_u_err'] = -999.
        photom['VLT_g'] = 21.8  # Dust corrected
        photom['VLT_g_err'] = 0.1
        photom['VLT_I'] = 20.71  # Dust corrected
        photom['VLT_I_err'] = 0.05
        photom['VLT_z'] = 20.5  # Dust corrected
        photom['VLT_z_err'] = 0.2
        # Write
        photom = frbphotom.merge_photom_tables(photom, photom_file)
        photom.write(photom_file,
                     format=frbphotom.table_format,
                     overwrite=True)
    host190102.parse_photom(
        Table.read(photom_file, format=frbphotom.table_format))

    # PPXF
    if run_ppxf:
        # MagE
        results_file = os.path.join(db_path, 'CRAFT', 'Bhandari2019',
                                    'HG190102_MagE_ppxf.ecsv')
        spec_fit = os.path.join(db_path, 'CRAFT', 'Bhandari2019',
                                'HG190102_MagE_ppxf.fits')
        meta, spectrum = host190102.get_metaspec(instr='MagE')
        R = meta['R']
        # Correct for Galactic extinction
        ebv = float(nebular.get_ebv(host190102.coord)['meanValue'])
        alAV = nebular.load_extinction('MW')
        AV = ebv * 3.1  # RV
        Al = alAV(spectrum.wavelength.value) * AV
        # New spec
        new_flux = spectrum.flux * 10**(Al / 2.5)
        new_sig = spectrum.sig * 10**(Al / 2.5)
        new_spec = XSpectrum1D.from_tuple(
            (spectrum.wavelength, new_flux, new_sig))
        # Mask
        atmos = [(7550, 7750)]
        ppxf.run(new_spec,
                 R,
                 host190102.z,
                 results_file=results_file,
                 spec_fit=spec_fit,
                 chk=True,
                 atmos=atmos)
    host190102.parse_ppxf(
        os.path.join(db_path, 'CRAFT', 'Bhandari2019',
                     'HG190102_MagE_ppxf.ecsv'))

    # Derived quantities

    # AV
    host190102.calc_nebular_AV('Ha/Hb')

    # SFR
    host190102.calc_nebular_SFR('Ha')
    #host.derived['SFR_nebular_err'] = -999.

    # CIGALE
    host190102.parse_cigale(
        os.path.join(db_path, 'CRAFT', 'Bhandari2019', 'HG190102_CIGALE.fits'))

    # Vet all
    host190102.vet_all()

    # Write -- BUT DO NOT ADD TO REPO (YET)
    path = resource_filename('frb', 'data/Galaxies/{}'.format(frbname))
    host190102.write_to_json(path=path)
コード例 #34
0
ファイル: ast.py プロジェクト: cs362sp16/cs562w16
            random.choice(ascii_uppercase) for i in range(1)))
        random_int = random.randint(0, 9)
        random_int2 = random.randint(0, 9)
        random_int3 = random.randint(0, 9)

        t.add_column(
            Column(data=[random_int, random_int2, random_int3],
                   name=Random_column_Name))

        print(t)

        print 'Pass test for adding column: ', t[Random_column_Name]
    return


t = Table()


def addingcolfix():

    #t.add_column(Column(data = [1,2,3], name = 'ssdsd' ), rename_duplicate = True)
    for i in range(1, 10):
        Random_column_Unit = (''.join(
            random.choice(ascii_uppercase) for i in range(1)))
        Random_column_Name = (''.join(
            random.choice(ascii_uppercase) for i in range(1)))
        Random_description = (''.join(
            random.choice(ascii_uppercase) for i in range(1)))
        random_int = random.randint(0, 9)
        random_int2 = random.randint(0, 9)
        random_int3 = random.randint(0, 9)
コード例 #35
0
ファイル: build_hosts.py プロジェクト: lao19881213/FRB
def build_host_181112(build_photom=False):
    """ Build the host galaxy data for FRB 181112

    All of the data comes from Prochaska+2019, Science

    Args:
        build_photom (bool, optional):

    """
    frbname = '181112'
    FRB_coord = SkyCoord('J214923.63-525815.39',
                         unit=(units.hourangle,
                               units.deg))  # Cherie;  2019-04-17 (Slack)
    # Coord from DES
    Host_coord = SkyCoord('J214923.66-525815.28',
                          unit=(units.hourangle, units.deg))  # from DES

    # Instantiate
    host181112 = frbgalaxy.FRBHost(Host_coord.ra.value, Host_coord.dec.value,
                                   frbname)
    host181112.frb_coord = FRB_coord

    # Redshift
    host181112.set_z(0.4755, 'spec', err=7e-5)

    # ############
    # Photometry

    # DES
    # Grab the table (requires internet)
    search_r = 2 * units.arcsec
    des_srvy = des.DES_Survey(Host_coord, search_r)
    des_tbl = des_srvy.get_catalog(print_query=True)

    host181112.parse_photom(des_tbl)

    # VLT -- Lochlan 2019-05-02
    # VLT -- Lochlan 2019-06-18
    photom_file = os.path.join(db_path, 'CRAFT', 'Prochaska2019',
                               'prochaska2019_photom.ascii')
    if build_photom:
        photom = Table()
        photom['Name'] = ['HG{}'.format(frbname)]
        photom['ra'] = host181112.coord.ra.value
        photom['dec'] = host181112.coord.dec.value
        photom['VLT_g'] = 22.57
        photom['VLT_g_err'] = 0.04
        photom['VLT_I'] = 21.51
        photom['VLT_I_err'] = 0.04
        # Add in DES
        for key in host181112.photom.keys():
            photom[key] = host181112.photom[key]
        # Merge/write
        photom = frbphotom.merge_photom_tables(photom, photom_file)
        photom.write(photom_file,
                     format=frbphotom.table_format,
                     overwrite=True)
    host181112.parse_photom(
        Table.read(photom_file, format=frbphotom.table_format))

    # Nebular lines
    host181112.parse_ppxf(
        os.path.join(db_path, 'CRAFT', 'Prochaska2019',
                     'HG181112_FORS2_ppxf.ecsv'))

    # Adjust errors on Ha, [NII] because of telluric

    # Derived quantities
    host181112.calc_nebular_AV('Ha/Hb', min_AV=0.)

    # Ha is tough in telluric
    host181112.calc_nebular_SFR('Hb', AV=0.15)  # Photometric
    # This would be an upper limit
    #host.calc_nebular_SFR('Ha')

    # CIGALE
    host181112.parse_cigale(
        os.path.join(db_path, 'CRAFT', 'Prochaska2019',
                     'HG181112_CIGALE.fits'))

    # Write
    path = resource_filename('frb', 'data/Galaxies/{}'.format(frbname))
    host181112.write_to_json(path=path)
コード例 #36
0
ファイル: build_hosts.py プロジェクト: lao19881213/FRB
def build_host_190523(
        build_photom=False):  #:run_ppxf=False, build_photom=False):
    """
    Build the host galaxy data for FRB 190523

    Most of the data is from Ravi+2019
        https://ui.adsabs.harvard.edu/abs/2019Natur.572..352R/abstract

    The exception is that CRAFT (S. Simha) have run CIGALE on the photometry for
    a consistent analysis with the ASKAP hosts.


    Args:
        build_photom:

    Returns:

    """
    frbname = '190523'
    gal_coord = SkyCoord(ra=207.06433, dec=72.470756, unit='deg')

    # Instantiate
    host190523 = frbgalaxy.FRBHost(gal_coord.ra.value, gal_coord.dec.value,
                                   frbname)

    # Load redshift table
    host190523.set_z(0.660, 'spec')

    # Morphology

    # Photometry

    # PanStarrs
    # Grab the table (requires internet)
    photom_file = os.path.join(db_path, 'DSA', 'Ravi2019',
                               'ravi2019_photom.ascii')
    if build_photom:
        search_r = 1 * units.arcsec
        ps_srvy = panstarrs.Pan_STARRS_Survey(gal_coord, search_r)
        ps_tbl = ps_srvy.get_catalog(print_query=True)
        photom = frbphotom.merge_photom_tables(ps_tbl, photom_file)
        photom.write(photom_file,
                     format=frbphotom.table_format,
                     overwrite=True)
    # Parse
    host190523.parse_photom(
        Table.read(photom_file, format=frbphotom.table_format))

    # PPXF
    '''
    if run_ppxf:
        results_file = os.path.join(db_path, 'CRAFT', 'Bhandari2019', 'HG190608_SDSS_ppxf.ecsv')
        meta, spectrum = host190608.get_metaspec(instr='SDSS')
        spec_fit = None
        ppxf.run(spectrum, 2000., host190608.z, results_file=results_file, spec_fit=spec_fit, chk=True)
    host190608.parse_ppxf(os.path.join(db_path, 'CRAFT', 'Bhandari2019', 'HG190608_SDSS_ppxf.ecsv'))
    '''

    # CIGALE -- PanStarrs photometry but our own CIGALE analysis
    host190523.parse_cigale(
        os.path.join(db_path, 'DSA', 'Ravi2019', 'S1_190523_CIGALE.fits'))

    # Derived quantities
    host190523.derived['SFR_nebular'] = 1.3
    host190523.derived['SFR_nebular_err'] = -999.

    # Vet all
    host190523.vet_all()

    # Write -- BUT DO NOT ADD TO REPO (YET)
    path = resource_filename('frb', 'data/Galaxies/{}'.format(frbname))
    host190523.write_to_json(path=path)
コード例 #37
0
ファイル: build_hosts.py プロジェクト: lao19881213/FRB
def build_host_180924(build_photom=True):
    """
    Generate the JSON file for FRB 180924
    
    All data are from Bannister et al. 2019
        https://ui.adsabs.harvard.edu/abs/2019Sci...365..565B/abstract

    Writes to 180924/FRB180924_host.json

    Args:
        build_photom (bool, optional): Generate the photometry file in the Galaxy_DB
    """
    frbname = '180924'
    gal_coord = SkyCoord('J214425.25-405400.81',
                         unit=(units.hourangle, units.deg))

    # Instantiate
    host = frbgalaxy.FRBHost(gal_coord.ra.value, gal_coord.dec.value, '180924')

    # Redshift -- JXP measured from multiple data sources
    host.set_z(0.3212, 'spec')

    # Morphology
    host.parse_galfit(
        os.path.join(db_path, 'CRAFT', 'Bannister2019',
                     'HG180924_DES_galfit.log'), 0.263)

    # Photometry
    # DES
    search_r = 2 * units.arcsec
    des_srvy = des.DES_Survey(gal_coord, search_r)
    des_tbl = des_srvy.get_catalog(print_query=True)
    host.parse_photom(des_tbl)

    # Grab the table (requires internet)
    photom_file = os.path.join(db_path, 'CRAFT', 'Bannister2019',
                               'bannister2019_photom.ascii')
    if build_photom:
        photom = Table()
        photom['Name'] = ['HG{}'.format(frbname)]
        photom['ra'] = host.coord.ra.value
        photom['dec'] = host.coord.dec.value
        photom['VLT_g'] = 21.38
        photom['VLT_g_err'] = 0.04
        photom['VLT_I'] = 20.10
        photom['VLT_I_err'] = 0.02
        # Add in DES
        for key in host.photom.keys():
            photom[key] = host.photom[key]
        # Merge/write
        photom = frbphotom.merge_photom_tables(photom, photom_file)
        photom.write(photom_file,
                     format=frbphotom.table_format,
                     overwrite=True)

    # Parse
    host.parse_photom(Table.read(photom_file, format=frbphotom.table_format))
    #host.parse_photom(des_tbl)

    # PPXF
    host.parse_ppxf(
        os.path.join(db_path, 'CRAFT', 'Bannister2019',
                     'HG180924_MUSE_ppxf.ecsv'))

    # Derived quantities

    # AV
    host.calc_nebular_AV('Ha/Hb')

    # SFR
    host.calc_nebular_SFR('Ha')
    host.derived['SFR_nebular_err'] = -999.

    # CIGALE
    host.parse_cigale(
        os.path.join(db_path, 'CRAFT', 'Bannister2019',
                     'HG180924_CIGALE.fits'), 0.263)

    # Vet all
    host.vet_all()

    # Write
    path = resource_filename('frb', 'data/Galaxies/180924')
    host.write_to_json(path=path)
コード例 #38
0
ファイル: build_hosts.py プロジェクト: lao19881213/FRB
def build_host_121102(build_photom=False):
    """
    Generate the JSON file for FRB 121102

    Writes to 121102/FRB121102_host.json

    All of the data currently comes from Tendulkar et al. 2017

    Args:
        build_photom (bool, optional): Generate the photometry file in the Galaxy_DB

    """
    FRB_coord = SkyCoord('05h31m58.698s +33d8m52.59s', frame='icrs')
    # Eyeball Tendulkar+17 PA
    gal_coord = FRB_coord.directional_offset_by(-45 * units.deg,
                                                286e-3 * units.arcsec)

    # Instantiate
    host121102 = frbgalaxy.FRBHost(gal_coord.ra.value, gal_coord.dec.value,
                                   '121102')

    # Redshift
    host121102.set_z(0.19273, 'spec', err=0.00008)

    # Photometry -- Tendulkar 2017
    photom_file = os.path.join(db_path, 'Repeater', 'Tendulkar2017',
                               'tendulkar2017_photom.ascii')
    if build_photom:
        photom = Table()
        #photom['Name'] = ['HG121102']  DO NOT USE str columns!
        photom['ra'] = [host121102.coord.ra.value]
        photom['dec'] = host121102.coord.dec.value
        #
        photom['GMOS_r'] = 25.1
        photom['GMOS_r_err'] = 0.1
        photom['GMOS_i'] = 23.9
        photom['GMOS_i_err'] = 0.1
        # Write
        photom = frbphotom.merge_photom_tables(photom, photom_file)
        photom.write(photom_file,
                     format=frbphotom.table_format,
                     overwrite=True)
    host121102.parse_photom(
        Table.read(photom_file, format=frbphotom.table_format))

    # Nebular lines
    neb_lines = {}
    neb_lines['Halpha'] = 0.652e-16
    neb_lines['Halpha_err'] = 0.009e-16
    neb_lines['Halpha_Al'] = 0.622
    #
    neb_lines['Hbeta'] = 0.118e-16
    neb_lines['Hbeta_err'] = 0.011e-16
    neb_lines['Hbeta_Al'] = 0.941
    #
    neb_lines['[OIII] 5007'] = 0.575e-16
    neb_lines['[OIII] 5007_err'] = 0.011e-16
    neb_lines['[OIII] 5007_Al'] = 0.911
    #
    neb_lines[
        '[NII] 6584'] = 0.030e-16  # * units.erg/units.cm**2/units.s      # Upper limit
    neb_lines[
        '[NII] 6584_err'] = -999.  # * units.erg/units.cm**2/units.s      # Upper limit
    neb_lines['[NII] 6584_Al'] = 0.619

    AV = 2.42

    # Extinction correct
    for key in neb_lines.keys():
        if '_err' in key:
            continue
        if 'Al' in key:
            continue
        # Ingest
        host121102.neb_lines[key] = neb_lines[key] * 10**(
            neb_lines[key + '_Al'] * AV / 2.5)
        if neb_lines[key + '_err'] > 0:
            host121102.neb_lines[key +
                                 '_err'] = neb_lines[key + '_err'] * 10**(
                                     neb_lines[key + '_Al'] * AV / 2.5)
        else:
            host121102.neb_lines[key + '_err'] = neb_lines[key + '_err']

    # Vette
    for key in host121102.neb_lines.keys():
        if '_err' in key:
            continue
        assert key in defs.valid_neb_lines

    # Morphology
    host121102.morphology['reff_ang'] = 0.41
    host121102.morphology['reff_ang_err'] = 0.06
    #
    host121102.morphology['n'] = 2.2
    host121102.morphology['n_err'] = 1.5
    #
    host121102.morphology['b/a'] = 0.25
    host121102.morphology['b/a_err'] = 0.13

    # Derived quantities
    host121102.derived['M_r'] = -17.0  # AB; Tendulkar+17
    host121102.derived['M_r_err'] = 0.2  # Estimated by JXP
    host121102.derived['SFR_nebular'] = 0.23  # MSun/yr; Tendulkar+17
    host121102.derived['Mstar'] = 5.5e7  # Msun; Tendulkar+17
    host121102.derived['Mstar_err'] = 1.5e7  # Msun; Tendulkar+17
    host121102.derived[
        'Z_spec'] = -0.16  # Tendulkar+17 on a scale with Solar O/H = 8.86
    host121102.derived['Z_spec_err'] = -999.  # Tendulkar+17

    # Vet
    assert host121102.vet_all()

    # Write
    path = resource_filename('frb', 'data/Galaxies/121102')
    host121102.write_to_json(path=path, overwrite=True)