Beispiel #1
0
def amplitude_of_best_fit_greybody(Trf = None, b = 2.0, Lrf = None, zin = None):
	'''
	Same as single_simple_flux_from_greybody, but to made an amplitude lookup table
	'''

	nsed = 1e4
	lambda_mod = loggen(1e3, 8.0, nsed) # microns
	nu_mod = c * 1.e6/lambda_mod # Hz

	#cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
	conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2    units are L_sun/(Jy x Hz)

	Lir = Lrf / conversion # Jy x Hz

	Ain = 1.0e-36 #good starting parameter
	betain =  b
	alphain=  2.0

	fit_params = Parameters()
	fit_params.add('Ain', value= Ain)

	#THE LM FIT IS HERE
	Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,Trf/(1.+zin),b,alphain))
	#pdb.set_trace()
	return Pfin.params['Ain'].value
Beispiel #2
0
def simple_flux_from_greybody(lambdavector, Trf = None, b = None, Lrf = None, zin = None, ngal = None):
	'''
	Return flux densities at any wavelength of interest (in the range 1-10000 micron),
	assuming a galaxy (at given redshift) graybody spectral energy distribution (SED),
	with a power law replacing the Wien part of the spectrum to account for the
	variability of dust temperatures within the galaxy. The two different functional
	forms are stitched together by imposing that the two functions and their first
	derivatives coincide. The code contains the nitty-gritty details explicitly.

	Inputs:
	alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003]
	betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985]
	Trf = rest-frame temperature [in K; default = 20K]
	Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10]
	zin = galaxy redshift [default = 0.001]
	lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)];

	AUTHOR:
	Lorenzo Moncelsi [[email protected]]

	HISTORY:
	20June2012: created in IDL
	November2015: converted to Python
	'''

	nwv = len(lambdavector)
	nuvector = c * 1.e6 / lambdavector # Hz

	nsed = 1e4
	lambda_mod = loggen(1e3, 8.0, nsed) # microns
	nu_mod = c * 1.e6/lambda_mod # Hz

	#Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009)
	#cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
	conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2    units are L_sun/(Jy x Hz)

	Lir = Lrf / conversion # Jy x Hz

	Ain = np.zeros(ngal) + 1.0e-36 #good starting parameter
	betain =  np.zeros(ngal) + b
	alphain=  np.zeros(ngal) + 2.0

	fit_params = Parameters()
	fit_params.add('Ain', value= Ain)
	#fit_params.add('Tin', value= Trf/(1.+zin), vary = False)
	#fit_params.add('betain', value= b, vary = False)
	#fit_params.add('alphain', value= alphain, vary = False)

	#pdb.set_trace()
	#THE LM FIT IS HERE
	#Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal))
	Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal,Trf/(1.+zin),b,alphain))

	#pdb.set_trace()
	flux_mJy=sed(Pfin.params,nuvector,ngal,Trf/(1.+zin),b,alphain)

	return flux_mJy
Beispiel #3
0
def fast_Lir(m,zin): #Tin,betain,alphain,z):
  '''I dont know how to do this yet'''
  wavelength_range = np.linspace(8.,1000.,10.*992.)
  model_sed = fast_sed(m,wavelength_range)

  nu_in = c * 1.e6 / wavelength_range
  ns = len(nu_in)

  dnu = nu_in[0:ns-1] - nu_in[1:ns]
  dnu = np.append(dnu[0],dnu)
  Lir = np.sum(model_sed * dnu, axis=1)
  conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2    units are L_sun/(Jy x Hz)

  Lrf = Lir * conversion # Jy x Hz
  return Lrf
Beispiel #4
0
def fast_double_Lir(m,zin): #Tin,betain,alphain,z):
  '''I dont know how to do this yet'''
  wavelength_range = np.linspace(8.,1000.,10.*992.)

  v = m.valuesdict()
  betain = np.asarray(v['beta'])
  alphain = np.asarray(v['alpha'])
  A_hot= np.asarray(v['A_hot'])
  A_cold= np.asarray(v['A_cold'])
  T_hot = np.asarray(v['T_hot'])
  T_cold = np.asarray(v['T_cold'])

  #Hot
  p_hot = Parameters()
  p_hot.add('A', value = A_hot, vary = True)
  p_hot.add('T_observed', value = T_hot, vary = True)
  p_hot.add('beta', value = betain, vary = False)
  p_hot.add('alpha', value = alphain, vary = False)
  hot_sed = fast_sed(p_hot,wavelength_range)

  #Hot
  p_cold = Parameters()
  p_cold.add('A', value = A_cold, vary = True)
  p_cold.add('T_observed', value = T_cold, vary = True)
  p_cold.add('beta', value = betain, vary = False)
  p_cold.add('alpha', value = alphain, vary = False)
  cold_sed = fast_sed(p_cold,wavelength_range)

  nu_in = c * 1.e6 / wavelength_range
  ns = len(nu_in)

  dnu = nu_in[0:ns-1] - nu_in[1:ns]
  dnu = np.append(dnu[0],dnu)
  Lir_hot = np.sum(hot_sed * dnu, axis=1)
  Lir_cold = np.sum(cold_sed * dnu, axis=1)
  conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2    units are L_sun/(Jy x Hz)

  Lrf_hot = Lir_hot * conversion # Jy x Hz
  Lrf_cold = Lir_cold * conversion # Jy x Hz
  return [Lrf_hot, Lrf_cold]
Beispiel #5
0
def get_params_string(tag):
    p=read_tag(tag)
    mtot=float(p["M"])
    q=float(p["q"])
    snr=0
    if("snr" in p["z"]):
        d=0;
    else:
        d=float(cosmo.luminosity_distance(float(p["z"]))/units.Mpc)
    inc=math.pi/float(p["inc"])
    m1=mtot/(1.0+1.0/q)
    m2=mtot/(1.0+q)
    t0=0
    phi0=math.pi/3.0
    beta=math.pi/3.0
    lamb=3.0*math.pi/4.0
    pol=math.pi/3.0
    val={"m1":m1,"m2":m2,"tRef":t0,"phiRef":phi0,"distance":d,"lambda":lamb,"beta":beta,"inclination":inc,"polarization":pol}
    s=""
    for par in flare_submit.parnames:
        s=s+str(val[par])+" "
    return s
    def set_redshift(self):
        """
        Function which takes the input desired observation redshift and
        cosmologically redshifts the source SED. This sets the redshifted_model
        which is what is needed for making observations.
        """

        z = self.z
        # Deepcopy the model to local variable to get proper behavior of model
        # object methods.
        redshifted_model = deepcopy(self.transient_model)
        # Compute the luminosity distance using Planck15 cosmological
        # parameters, by which to decrease the amplitude of the SED
        lumdist = cosmo.luminosity_distance(z).value * 1e6  # in pc
        # SED is assumed to be at 10pc so scale accordingly.
        amp = pow(np.divide(10.0, lumdist), 2)
        # Set the redshift of the SED, this stretches the wavelength
        # distribution.
        redshifted_model.set(z=z)
        # Separately set the amplitude, this rescales the flux at each
        # wavelength.
        redshifted_model.set(amplitude=amp)
        self.redshifted_model = redshifted_model
sp_nodust = fsps.StellarPopulation(zcontinuous=1,
                                   add_dust_emission=False,
                                   logzsol=Z,
                                   sfh=3,
                                   dust_type=2,
                                   dust2=0)

sp_nodust.set_tabular_sfh(agelims, sfrs)

wav, lum_hz_permass = sp_nodust.get_spectrum(tage=agelims.max())

lum_hz = lum_hz_permass * (10**mass)

W_hz = lum_hz * 3.827e26  #now W/Hz
z = 0.01
dl = Planck15.luminosity_distance(z)
dl = dl.to(u.m)

conversion = (1. / (4. * np.pi * (dl.value)**2)) * (1.0e26)
to_flux = W_hz * conversion  #now Jy
to_maggies = to_flux / 3631.

stellar_spec = to_maggies
obs_spec = spec

vband_idx = find_nearest(wav, 3000)

vband_extinction = obs_spec[vband_idx] / stellar_spec[vband_idx]

extinction = np.zeros(len(spec))
for i in range(spec.shape[0]):
Beispiel #8
0
from load_lc import get_uv_lc


def round_sig(x, sig=2):
    print(x)
    if x < 0:
        return -round(-x, sig - int(floor(log10(-x))) - 1)
    return round(x, sig - int(floor(log10(x))) - 1)


def ndec(num):
    dec = str(num).split('.')[-1]
    return len(dec)


d = Planck15.luminosity_distance(z=0.03154).cgs.value

headings = np.array([
    'Date (JD)', '$\Delta t$', 'Instrument', 'Filter', 'AB Mag',
    'Error in AB Mag'
])
label = "uvot-phot"
caption = "Optical and ultraviolet photometry for SN2018gep"

# Print the table headers
ncol = len(headings)
colstr = ""
colstr += 'l'
for col in np.arange(ncol - 1):
    colstr += "r"
print(colstr)
Beispiel #9
0
def redshift_to_luminosity_distance(redshift):
    return Planck15.luminosity_distance(redshift).value
Beispiel #10
0
def get_luminosity_old(
    simulation_data,
    unit_density,
    unit_length,
    unit_time,
    redshift,
    beam_FWHM_arcsec,
    ntracers,
    q=2.2,
    gamma_c=4.0 / 3.0,
    eta=0.1,
    freq=1.4 * _u.GHz,
    prs_scale=1e-11 * _u.Pa,
    vol_scale=(1 * _u.kpc)**3,
    alpha=0.6,
    tracer_threshold=1.0e-7,
    tracer_effective_zero=1e-10,
    radio_cell_volumes=None,
    radio_cell_areas=None,
    L0=None,
    calculate_luminosity=True,
    convolve_flux=False,
):

    # units
    unit_mass = (unit_density * (unit_length**3)).to(_u.kg)
    unit_pressure = unit_mass / (unit_length * unit_time**2)

    # distance information and conversions
    Dlumin = _cosmo.luminosity_distance(redshift)
    kpc_per_arcsec = _cosmo.kpc_proper_per_arcmin(redshift).to(_u.kpc /
                                                               _u.arcsec)

    # simulation data
    if radio_cell_volumes is None:
        radio_cell_volumes = _ps.calculate_cell_volume(simulation_data)
    if radio_cell_areas is None:
        radio_cell_areas = _ps.calculate_cell_area(simulation_data)

    # in physical units
    radio_cell_areas_physical = radio_cell_areas * unit_length**2
    radio_cell_volumes_physical = radio_cell_volumes * unit_length**3

    # pressure in physical units
    radio_prs_scaled = simulation_data.prs * unit_pressure

    # luminosity scaling
    if L0 is None:
        L0 = get_L0(q, gamma_c, eta, freq=freq, prs=prs_scale, vol=vol_scale)

    # beam information
    sigma_beam_arcsec = beam_FWHM_arcsec / 2.355
    area_beam_kpc2 = (_np.pi * (sigma_beam_arcsec * kpc_per_arcsec)**2).to(
        _u.kpc**2)

    # n beams per cell
    n_beams_per_cell = (radio_cell_areas_physical / area_beam_kpc2).si

    (radio_tracer_mask, clamped_tracers,
     radio_combined_tracers) = clamp_tracers(simulation_data, ntracers,
                                             tracer_threshold,
                                             tracer_effective_zero)

    radio_luminosity_tracer_weighted = None
    if calculate_luminosity is True:
        radio_luminosity = (L0 *
                            (radio_prs_scaled / prs_scale)**((q + 5.0) / 4.0) *
                            radio_cell_volumes_physical / vol_scale).to(_u.W /
                                                                        _u.Hz)
        radio_luminosity_tracer_weighted = (radio_luminosity *
                                            radio_tracer_mask *
                                            clamped_tracers)

    flux_const_term = (L0 / (4 * _np.pi *
                             (Dlumin**2))) * ((1 + redshift)**(1 + alpha))
    flux_prs_term = (radio_prs_scaled / prs_scale)**((q + 5.0) / 4.0)
    flux_vol_term = radio_cell_volumes_physical / vol_scale
    flux_beam_term = 1 / n_beams_per_cell
    flux_density = (flux_const_term * flux_prs_term * flux_vol_term *
                    flux_beam_term).to(_u.Jy)
    flux_density_tracer_weighted = flux_density * radio_tracer_mask * clamped_tracers

    if convolve_flux is True:
        beam_kernel = _Gaussian2DKernel(
            (sigma_beam_arcsec * kpc_per_arcsec).to(_u.kpc).value)
        flux_density_tracer_weighted = (
            _convolve(flux_density_tracer_weighted.to(_u.Jy),
                      beam_kernel,
                      boundary="extend") * _u.Jy)

    return (radio_luminosity_tracer_weighted, flux_density_tracer_weighted)
Beispiel #11
0
    dx0 = np.diff(lamb)[0]
    dx = np.diff(xs)[0]
    dx2 = 0.5*dx
    mags = table[i,7:]
    Ix = xs[(xs>xi - dx2)&(xs<xf + dx2)]
    IF = mags[(xs>xi - dx2)&(xs<xf + dx2)]
    F_in = 10**((IF+48)/-2.5)
#    F_in /= (1+table[i,2])
    for c in range(len(Ix)):
        if c == 0:
            if (0<(Ix[c]- xi)<dx2):
                F_in[c] *= ((dx2 - abs(Ix[c]- xi))/dx)
            elif (0<(xi - Ix[c])<dx2):
                F_in[c] *= ((dx2 + abs(Ix[c]- xi))/dx)
        if c == len(Ix) - 1:
            if (0<(xf - Ix[c])<dx2):
                F_in[c] *= ((dx2 - abs(Ix[c]- xf))/dx)
            elif (0<(Ix[c] - xf) < dx2):
                F_in[c] *= ((dx2 + abs(Ix[c]- xf))/dx)
    P_B.append(np.sum(F_in*dx)/dx0)
PB = np.array(P_B)
PAUS_BLUE = PB[(table[:,2]<0.9)&(table[:,2]>0.11)]
new_table = table[:,(0,1,2,3,4,5,6)][(table[:,2]<0.9)&(table[:,2]>0.11)]
mB = -2.5*np.log10(PAUS_BLUE) - 48
MB = mB - 25 - 5*np.log10(cosmo.luminosity_distance(new_table[:,2]).value)
new_table = np.vstack([new_table.T,MB]).T
t = Table(new_table,names=['RA','DEC','Z','m_i','M_I','FHalpha','SFR','MB'])
print "saving data from "+str(len(MB))+" of " +str(i) + "galaxies in fits format... at /cosma/home/durham/jarmijo/PAU_test/catalogs/mocks_MBlue.fits\n"
t.write('/cosma/home/dp004/dc-armi2/PAU_test/catalogs/mocks_radecz_MIMB_SFRHaplha_'+icut+'cut.fits',format='fits')
print "end of program.\n"
Beispiel #12
0
def dump_ppxf_results(ppfit, miles, z, outfile):
    """
    Write the stnadard results and the
    gas_component measurements to a simple ASCII file

    Parameters
    ----------
    ppfit: ppxf
    outfile: str

    Returns
    -------

    """
    # Get the lines (air)
    emission_lines, line_names, line_wave = util.emission_lines(
        np.array([0.1, 0.2]), [1000., 1e5],
        0,
        limit_doublets=False,
        vacuum=True)
    # Construct a simple Table
    gas_tbl = Table()

    # Standard pPXF fit results
    meta = {}
    meta['EBV'] = ppfit.reddening

    star_weights = ppfit.weights[~ppfit.gas_component]
    star_weights = star_weights.reshape(ppfit.reg_dim)
    age, metals = miles.mean_age_metal(star_weights)
    meta['AGE'] = age
    meta['METALS'] = metals

    # Mass -- Approximate
    # Mass -- This is a bit approximate as Dwv is a guess for now
    actualflux = ppfit.bestfit * constants.L_sun.cgs / units.angstrom / (
        4 * np.pi * (cosmo.luminosity_distance(z).to(units.cm))**2 / (1 + z))
    # When fitting, the routine thought our data and model spectra had same units...
    Dwv = 1700.  # Ang, width of the band pass
    scfactor = np.median(ppfit.bestfit *
                         (units.erg / units.s / units.cm**2 / units.angstrom) /
                         actualflux) * Dwv
    # To get the actual model mass required to fit spectrum, scale by this ratio
    massmodels = scfactor * miles.total_mass(star_weights)
    meta['LOGMSTAR'] = np.log10(massmodels.value)

    gas_tbl.meta = meta

    gas = ppfit.gas_component
    comp = ppfit.component[gas]
    gas_tbl['comp'] = comp
    gas_tbl['name'] = ppfit.gas_names
    gas_tbl['flux'] = ppfit.gas_flux
    gas_tbl['err'] = ppfit.gas_flux_error

    # Wavelengths
    waves = []
    for name in ppfit.gas_names:
        idx = np.where(line_names == name)[0][0]
        waves.append(line_wave[idx])
    gas_tbl['wave'] = waves

    vs = [ppfit.sol[icomp][0] for icomp in comp]
    sigs = [ppfit.sol[icomp][1] for icomp in comp]
    gas_tbl['v'] = vs
    gas_tbl['sig'] = sigs
    # Write
    gas_tbl.write(outfile, format='ascii.ecsv', overwrite=True)
    print("Wrote: {:s}".format(outfile))
Beispiel #13
0
def ujy_to_flux(ujy, z):
    d = Planck15.luminosity_distance(z=z).cgs.value
    return ujy * 1E-6 * 1E-23 * 4 * np.pi * d**2
Beispiel #14
0
    names, ra, dec, dates, z = get_transients()

    # initialize plot
    fig, axarr = plt.subplots(2, 1, figsize=(5, 6))

    # redshift distribution of all of them
    choose = z > 0
    axarr[0].hist(z[choose], histtype='step', color='k')
    ax = axarr[0]
    ax.set_xlabel("Redshift", fontsize=14)
    ax.set_ylabel("\# Transients", fontsize=14)
    ax.tick_params(axis='both', labelsize=14)
    ax.axvline(x=0.014, c='k', ls='--', lw=2.0)

    # distribution of peak flux at 230 GHz
    dist = Planck15.luminosity_distance(z=z[choose]).cgs.value
    ref = Planck15.luminosity_distance(z=0.014).cgs.value
    flux = 50e3 * (ref / dist)**2
    ax = axarr[1]
    ax.hist(flux,
            histtype='step',
            color='k',
            bins=np.logspace(np.log10(1.1), np.log10(5000), 10))
    ax.set_xscale('log')
    ax.set_xlabel("Peak Flux ($\\mu$Jy) at $230\,$GHz", fontsize=14)
    ax.set_ylabel("\# Transients", fontsize=14)
    ax.tick_params(axis='both', labelsize=14)
    ax.axvline(x=50e3, c='k', ls='--', lw=2.0)
    ax.text(0.94,
            0.9,
            "AT2018cow",
Beispiel #15
0
def derive_and_sanitize():
    # Calculate some columns based on imported data, sanitize some fields
    for name in events:
        set_first_max_light(name)
        if 'claimedtype' in events[name]:
            events[name]['claimedtype'][:] = [ct for ct in events[name]['claimedtype'] if (ct['value'] != '?' and ct['value'] != '-')]
        if 'redshift' in events[name] and 'hvel' not in events[name]:
            # Find the "best" redshift to use for this
            bestsig = 0
            for z in events[name]['redshift']:
                sig = get_sig_digits(z['value'])
                if sig > bestsig:
                    bestz = z['value']
                    bestsig = sig
            if bestsig > 0:
                bestz = float(bestz)
                add_quanta(name, 'hvel', pretty_num(clight/1.e5*((bestz + 1.)**2. - 1.)/
                    ((bestz + 1.)**2. + 1.), sig = bestsig), 'D')
        elif 'hvel' in events[name] and 'redshift' not in events[name]:
            # Find the "best" hvel to use for this
            bestsig = 0
            for hv in events[name]['hvel']:
                sig = get_sig_digits(hv['value'])
                if sig > bestsig:
                    besthv = hv['value']
                    bestsig = sig
            if bestsig > 0 and is_number(besthv):
                voc = float(besthv)*1.e5/clight
                add_quanta(name, 'redshift', pretty_num(sqrt((1. + voc)/(1. - voc)) - 1., sig = bestsig), 'D')
        if 'maxabsmag' not in events[name] and 'maxappmag' in events[name] and 'lumdist' in events[name]:
            # Find the "best" distance to use for this
            bestsig = 0
            for ld in events[name]['lumdist']:
                sig = get_sig_digits(ld['value'])
                if sig > bestsig:
                    bestld = ld['value']
                    bestsig = sig
            if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
                add_quanta(name, 'maxabsmag', pretty_num(float(events[name]['maxappmag'][0]['value']) -
                    5.0*(log10(float(bestld)*1.0e6) - 1.0), sig = bestsig), 'D')
        if 'redshift' in events[name]:
            # Find the "best" redshift to use for this
            bestsig = 0
            for z in events[name]['redshift']:
                sig = get_sig_digits(z['value'])
                if sig > bestsig:
                    bestz = z['value']
                    bestsig = sig
            if bestsig > 0 and float(bestz) > 0.:
                if 'lumdist' not in events[name]:
                    dl = cosmo.luminosity_distance(float(bestz))
                    add_quanta(name, 'lumdist', pretty_num(dl.value, sig = bestsig), 'D')
                    if 'maxabsmag' not in events[name] and 'maxappmag' in events[name]:
                        add_quanta(name, 'maxabsmag', pretty_num(float(events[name]['maxappmag'][0]['value']) -
                            5.0*(log10(dl.to('pc').value) - 1.0), sig = bestsig), 'D')
        if 'photometry' in events[name]:
            events[name]['photometry'].sort(key=lambda x: (float(x['time']),
                x['band'] if 'band' in x else '', float(x['magnitude'] if 'magnitude' in x else x['counts'])))
        if 'spectra' in events[name] and list(filter(None, ['time' in x for x in events[name]['spectra']])):
            events[name]['spectra'].sort(key=lambda x: float(x['time']))
        events[name] = OrderedDict(sorted(events[name].items(), key=lambda key: event_attr_priority(key[0])))
Beispiel #16
0
def simple_flux_from_greybody(lambdavector,
                              Trf=None,
                              b=None,
                              Lrf=None,
                              zin=None,
                              ngal=None):
    '''
	Return flux densities at any wavelength of interest (in the range 1-10000 micron),
	assuming a galaxy (at given redshift) graybody spectral energy distribution (SED),
	with a power law replacing the Wien part of the spectrum to account for the
	variability of dust temperatures within the galaxy. The two different functional
	forms are stitched together by imposing that the two functions and their first
	derivatives coincide. The code contains the nitty-gritty details explicitly.

	Inputs:
	alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003]
	betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985]
	Trf = rest-frame temperature [in K; default = 20K]
	Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10]
	zin = galaxy redshift [default = 0.001]
	lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)];

	AUTHOR:
	Lorenzo Moncelsi [[email protected]]

	HISTORY:
	20June2012: created in IDL
	November2015: converted to Python
	'''

    nwv = len(lambdavector)
    nuvector = c * 1.e6 / lambdavector  # Hz

    nsed = 1e4
    lambda_mod = loggen(1e3, 8.0, nsed)  # microns
    nu_mod = c * 1.e6 / lambda_mod  # Hz

    #Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009)
    #cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
    conversion = 4.0 * np.pi * (
        1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22
    )**2.0 / L_sun  # 4 * pi * D_L^2    units are L_sun/(Jy x Hz)

    Lir = Lrf / conversion  # Jy x Hz

    Ain = np.zeros(ngal) + 1.0e-36  #good starting parameter
    betain = np.zeros(ngal) + b
    alphain = np.zeros(ngal) + 2.0

    fit_params = Parameters()
    fit_params.add('Ain', value=Ain)
    #fit_params.add('Tin', value= Trf/(1.+zin), vary = False)
    #fit_params.add('betain', value= b, vary = False)
    #fit_params.add('alphain', value= alphain, vary = False)

    #pdb.set_trace()
    #THE LM FIT IS HERE
    #Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal))
    Pfin = minimize(sedint,
                    fit_params,
                    args=(nu_mod, Lir.value, ngal, Trf / (1. + zin), b,
                          alphain))

    #pdb.set_trace()
    flux_mJy = sed(Pfin.params, nuvector, ngal, Trf / (1. + zin), b, alphain)

    return flux_mJy
Beispiel #17
0
def radio():
    # radio
    dt = np.array([8, 8, 10, 10, 10, 10, 12])
    nu = np.array([4.8, 7.4, 6.1, 14, 20, 95, 6.1])
    f = np.array([79, 82, 33, 42, 90, 3600, 33])  # uJy
    ef = np.array([16, 15, -99, -99, -99, -99, -99])
    # -99 ef means upper limit

    f_cgs = f * 1E-6 * 1E-23 * 4 * np.pi * DIST**2

    # detections
    choose = ef > 0
    plt.errorbar(dt[choose], f_cgs[choose], yerr=ef[choose], fmt='.', c='k')

    # non-detections
    choose = ef < 0
    plt.scatter(dt[choose], f_cgs[choose], marker='v', c='k')

    # text saying the frequency
    for ii, val in enumerate(nu):
        if ii % 2 == 0:
            plt.text(dt[ii] * 1.01,
                     f_cgs[ii],
                     "%s GHz" % val,
                     fontsize=14,
                     horizontalalignment='left',
                     verticalalignment='top')
        else:
            plt.text(dt[ii] * 1.01,
                     f_cgs[ii],
                     "%s GHz" % val,
                     fontsize=14,
                     horizontalalignment='left',
                     verticalalignment='bottom')

    # and now limits for this new source...
    # dist was Sept 9 I think
    # AMI: Sept 13
    d = Planck15.luminosity_distance(z=0.033).cgs.value
    plt.scatter(4,
                35 * 1E-6 * 1E-23 * 4 * np.pi * d**2,
                s=50,
                c='lightblue',
                marker='v')
    plt.text(4 * 1.01,
             35 * 1E-6 * 1E-23 * 4 * np.pi * d**2,
             "15.5 GHz",
             horizontalalignment='left',
             fontsize=14)

    # SMA:
    plt.scatter(4,
                3.5 * 1E-3 * 1E-23 * 4 * np.pi * d**2,
                s=50,
                c='lightblue',
                marker='v')
    plt.text(4 * 1.01,
             3.5 * 1E-3 * 1E-23 * 4 * np.pi * d**2,
             "231.5 GHz",
             horizontalalignment='left',
             fontsize=14)

    dt = 6
    L = 0.59 * 1E-3 * 1E-23 * 4 * np.pi * d**2
    plt.scatter(dt, L, s=50, c='lightblue', marker='v')
    plt.text(dt * 1.01, L, "230 GHz", horizontalalignment='left', fontsize=14)

    # VLA:
    plt.scatter(5,
                91 * 1E-6 * 1E-23 * 4 * np.pi * d**2,
                s=50,
                c='lightblue',
                marker='v')
    plt.text(5 * 1.01,
             27 * 1E-6 * 1E-23 * 4 * np.pi * d**2,
             "10 GHz",
             horizontalalignment='left',
             fontsize=14)

    plt.yscale('log')
    plt.xlim(3, 14)
    plt.tick_params(labelsize=14)
    plt.xlabel("dt [day]", fontsize=14)
    plt.ylabel(r"$L_\nu $[erg/s/Hz]", fontsize=14)
    plt.show()
Beispiel #18
0
def lumlim(z, em, filter):

    print(
        "This function will calculate the luminosity that corresponds to a 5 sigma detection, in erg/s:"
    )
    #print("INFO: 26.2 is AB magnitude for 5 sigma detection limit in the z band") #this was used when I only had the z band
    #first I calculate the flux, then convert to flux density, then find the luminosity limit for the conditions printed above

    if filter == "uband":
        #ABmag is the coadded depth for a 5 sigma magnitude limit in this filter
        ABmag = 26.1
        lambdalow = 305.30  #in nm
        lambdahigh = 408.60  #in nm

    if filter == "gband":
        #ABmag is the coadded depth for a 5 sigma magnitude limit in this filter
        ABmag = 27.4
        lambdalow = 386.30  #in nm
        lambdahigh = 567.00  #in nm

    if filter == "rband":
        #ABmag is the coadded depth for a 5 sigma magnitude limit in this filter
        ABmag = 27.5
        lambdalow = 536.90  #in nm
        lambdahigh = 706.00  #in nm

    if filter == "iband":
        #ABmag is the coadded depth for a 5 sigma magnitude limit in this filter
        ABmag = 26.8
        lambdalow = 675.90  #in nm
        lambdahigh = 833.00  #in nm

    if filter == "zband":
        #ABmag is the coadded depth for a 5 sigma magnitude limit in this filter
        ABmag = 26.1
        lambdalow = 802.90  #in nm
        lambdahigh = 938.60  #in nm

    if filter == "yband":
        #ABmag is the coadded depth for a 5 sigma magnitude limit in this filter
        ABmag = 24.9
        lambdalow = 908.30  #in nm
        lambdahigh = 1099.60  #in nm

    #the following calculates values I use in and plug into the lumlim function

    #finds the flux density
    print("ABmagnitude = -2.5*log10(fluxdensity/(3631 Jansky))")
    print("consequently:")
    #print("fluxdensity = (10**(ABmagnitude/(-2.5)))*(3631 Janksy)")
    #fluxdens = (10**(ABmag/(-2.5)))*3631 #outputs in Jansky
    #uses ABmag from earlier in this function
    fluxdens = 10**((ABmag + 48.6) / (-2.5))  #outputs in erg/(s*Hz*(cm^2))
    print("flux density =", fluxdens, "erg/(s*Hz*(cm^2))")

    #finds the flux using the difference between the frequencies at each end of the band
    c = 2.9979 * (10**17)  #in nm/s
    deltanu = c * (
        (1 / lambdalow) - (1 / lambdahigh))  #the nm should cancel out
    print("deltanu =", deltanu, "s^-1")
    flux = fluxdens * deltanu  #*(10**(-23)) #the extra factor converts from Janskys to ergs/(s*Hz*(cm^2))
    print("flux =", flux, "erg/(s*(cm^2))")

    #finds the luminosity distance
    lumdist = cosmo.luminosity_distance(z)
    #this outputs a special object that keeps track of units, so first I convert it to cm (cgs units), and then I convert it to a regular number
    lumdist_cgs = lumdist.to('cm')
    lumdist_unitless = lumdist_cgs.value
    print("the luminosity distance for redshift z =", z, "is lumdist =",
          lumdist_unitless, "cm")

    #finds the luminosity limit
    print("Luminosity = 4*pi*(luminositydistance**2)*flux")
    lumlimit = 4 * numpy.pi * (lumdist_unitless**2) * flux
    print("luminosity limit for 5 sigma detection of", em,
          "in " + filter + " band is", lumlimit, "ergs/s")

    #using return makes the main output of this function the value of lumlimit so that I can use it to calculate other things when I call this function
    return lumlimit
#z1s, z2s, cthetas = z1s[fsel], z2s[fsel], cthetas[fsel]
#snnames1, snnames2, dangles = snnames1[fsel], snnames2[fsel], dangles[fsel]
#arr = arr.transpose()[fsel].transpose()

#Cijint = CovMatCalculator(z1s, z2s, cthetas)

#Cijint = np.genfromtxt('Cijintsavedlocaljustsome.txt', delimiter='|')

Cijint = np.genfromtxt('CijIntegrals/Cijintsavedall.txt', delimiter='|')
Cijint = Cijint / 2. / (np.pi**2. * 0.68**2.)

Sij = 4.7152924252903468 * ((1. + z1s)**2.) * (
    (1. + z2s)**
    2.) * Cijint * dDzbydtauofz(z2s) * dDzbydtauofz(z1s) * MPctoKM**2. / (
        cosmo.H(z1s) * cosmo.H(z2s) * cosmo.luminosity_distance(z1s) *
        cosmo.luminosity_distance(z2s)).value

#Hui and Greene definition follows
Cij = 4.7152924252903468 * (
    1. - (1 + z1s)**2. * SpeedOfLight /
    (cosmo.H(z1s) * cosmo.luminosity_distance(z1s)).value) * (
        1. - (1 + z2s)**2. * SpeedOfLight /
        (cosmo.H(z2s) * cosmo.luminosity_distance(z2s)).value
    ) * Cijint * MPctoKM**2. * dDzbydtauofz(z2s) * dDzbydtauofz(
        z1s) / SpeedOfLight**2.

Copmeans, MWmeans, JLACov, CopEmeans, MWEmeans, CopSDs, MWSDs = arr[0], arr[
    1], arr[2], arr[3], arr[4], arr[5], arr[6]

Copmeans = Copmeans * LinGrowthFactor(z1s) * LinGrowthFactor(
Beispiel #20
0
 def HI_flux(self):
     """Calculates HI flux in JyHz, ref: Meyer (2017)"""
     return 10**self.log10_mhi / (
         49.7 * cosmo.luminosity_distance(self.z_src).value**2)
Beispiel #21
0
def query_6df(ra, dec, rad=1, dist_min=0, dist_max=np.inf,
                sep_max_kpc=np.inf, catalog='VII/259/spectra'):
    """
    Fetch galaxies from the 6df catalog

    ---
    Parameters

    ra float
        Right ascension (deg)
    dec float
        Declination (deg)
    rad float
        Search radius (deg)
    dist_min float
        Min distance of the galaxies (Mpc)
    dist_max float
        Max distance of the galaxies (Mpc)
    sep_max_kpc float
        Min projected distance of the galaxies (kpc)
    catalog str
        Exact version of the catalog

    ---
    Return

    gal_select list
        Selected galaxies, all info
    sep_select list
        Separations for the selected galaxies (arcsec)
    dist_kpc_select list
        Separations for the selected galaxies (kpc)

    """

    # Create a SkyCoord object
    coord = SkyCoord(ra=ra, dec=dec,unit=(u.deg, u.deg))
    # Query GLADE via Vizier
    gal = Vizier.query_region(coord, radius=rad*u.deg,
                                   catalog=catalog)

    # Empty result
    if len(gal) == 0:
        return None, None, None

    # Check if any of the galaxies found are within an
    # acceptable distance
    gal_select = []
    dist_kpc_select = []
    sep_select = []

    # Get distances from redshifts
    print("Note: distances are luminosity distances (Mpc) \
using Planck+15 cosmology")

    for g in gal[0]:
        # Ignore galaxies too nearby
        dist_g = cosmo.luminosity_distance(g['z']).value
        if dist_g < dist_min or dist_g > dist_max:
            continue
        ra_deg = Angle(g['RAJ2000'] + " hours").deg
        dec_deg = Angle(g['DEJ2000'] + " degrees").deg
        sep = SkyCoord(ra=ra_deg*u.deg,
                       dec=dec_deg*u.deg).separation(coord)
        # Projected distance (kpc)
        dist_kpc = dist_g*(10**3)*np.sin(sep)/np.cos(sep)
        if dist_kpc >= 0 and dist_kpc < sep_max_kpc:
            gal_select.append(g)
            dist_kpc_select.append(dist_kpc)
            sep_select.append(sep)
    # No selected galaxies
    if len(gal_select) == 0:
        return None, None, None
    else:
        return gal_select, sep_select, dist_kpc_select
Beispiel #22
0
    def __init__(self, infile):

        self.Calzetti2000 = np.vectorize(self.Calzetti2000_novec)

        data = read_csv(infile)

        usecols = [
            'z', 'z_err', 'oii_flux', 'oii_flux_err', 'oiii_flux',
            'oiii_flux_err', 'h_alpha_flux', 'h_alpha_flux_err', 'h_beta_flux',
            'h_beta_flux_err', 'lgm_tot_p50', 'lgm_tot_p16', 'lgm_tot_p84',
            'sfr_tot_p50', 'sfr_tot_p16', 'sfr_tot_p84', 'oh_p50',
            'h_alpha_eqw', 'oiii_4959_eqw', 'oiii_5007_eqw', 'oii_3726_eqw',
            'oii_3729_eqw', 'h_beta_eqw', 'h_alpha_eqw_err',
            'oiii_4959_eqw_err', 'oiii_5007_eqw_err', 'oii_3726_eqw_err',
            'oii_3729_eqw_err', 'h_beta_eqw_err'
        ]
        newnames = [
            'z', 'z_err', 'oii_uncorr', 'oii_err_uncorr', 'oiii_uncorr',
            'oiii_err_uncorr', 'ha_uncorr', 'ha_err_uncorr', 'hb_uncorr',
            'hb_err_uncorr', 'logmstar', 'logmstar_lo', 'logmstar_hi', 'sfr',
            'sfr_lo', 'sfr_hi', 'o_abundance', 'ha_ew_uncorr',
            'oiii4959_ew_uncorr', 'oiii5007_ew_uncorr', 'oii3726_ew_uncorr',
            'oii3729_ew_uncorr', 'hb_ew_uncorr', 'ha_ew_err',
            'oiii4959_ew_err_uncorr', 'oiii5007_ew_err_uncorr',
            'oii3726_ew_err_uncorr', 'oii3729_ew_err_uncorr',
            'hb_ew_err_uncorr'
        ]

        for col, name in zip(usecols, newnames):
            setattr(self, name, data[col].values)

        for x, colname in enumerate(newnames):
            if 'flux' in usecols[x]:
                setattr(self, colname,
                        getattr(self, colname) /
                        10**17)  # Units are 10**-17 erg/s/cm^2

        # Dust correction
        # E(B-V) = log_{10}(ha_uncorr/(hb_uncorr*2.86)) *(-0.44/0.4) / (k(lam_ha) - k(lam_hb))

        self.EBV = np.log10(self.ha_uncorr / (self.hb_uncorr * 2.86)) * (
            -.44 / 0.4) / (self.Calzetti2000(6563.) - self.Calzetti2000(4863.))

        # A_oiii = self.Calzetti2000(4980.) * self.EBV / 0.44
        # A_oii = self.Calzetti2000(3727.) * self.EBV / 0.44
        # A_ha = self.Calzetti2000(6563.) * self.EBV / 0.44
        # A_hb = self.Calzetti2000(4863.) * self.EBV / 0.44

        for x, colname in enumerate(newnames):

            if 'ha_' in colname:
                wave = 6563.
            elif 'hb_' in colname:
                wave = 4863.
            elif 'oii_' in colname:
                wave = 3727.
            elif 'oiii_' in colname:
                wave = 4980.
            elif 'oii3726_' in colname:
                wave = 3726.
            elif 'oii3729_' in colname:
                wave = 3729.
            elif 'oiii4959_' in colname:
                wave = 4969.
            elif 'oiii5007_' in colname:
                wave = 5007.

            if 'uncorr' in colname and 'ew' not in colname:

                A_line = self.Calzetti2000(wave) * self.EBV / 0.44

                newflux = getattr(self, colname) * np.power(10, 0.4 * A_line)
                setattr(self, colname[:-7], newflux)

            elif 'uncorr' in colname and 'ew' in colname:

                multiplier = np.power(
                    10, 0.4 * self.Calzetti2000(wave) * self.EBV *
                    ((1. / .44) - 1.))
                setattr(self, colname[:-7],
                        getattr(self, colname) * multiplier)

        self.ha_lum = self.ha * 4 * np.pi * (cosmo.luminosity_distance(
            self.z).to('cm').value)**2

        goodind = np.where(np.log10(self.ha_lum) < 45)[0]

        for x, colname in enumerate(list(self.__dict__.keys())):
            if colname != 'Calzetti2000':
                setattr(self, colname, getattr(self, colname)[goodind])

        self.oiii_ew = self.oiii4959_ew + self.oiii5007_ew
        self.oii_ew = self.oii3726_ew + self.oii3729_ew
        self.oiii_ew_err = np.sqrt(self.oiii4959_ew_err**2. +
                                   self.oiii5007_ew_err**2.)
        self.oii_ew_err = np.sqrt(self.oii3726_ew_err**2. +
                                  self.oii3729_ew_err**2.)
Beispiel #23
0
from astropy.io import ascii


def round_sig(x, sig=2):
    print(x)
    if x < 0:
        return -round(-x, sig - int(floor(log10(-x))) - 1)
    return round(x, sig - int(floor(log10(x))) - 1)


def ndec(num):
    dec = str(num).split('.')[-1]
    return len(dec)


d = Planck15.luminosity_distance(z=0.2714).cgs.value

headings = np.array(['Date (MJD)', '$\Delta t$', 'Filter', 'AB Mag'])
label = "opt-phot"
caption = "Optical photometry for ZTF18abvkwla \
from forced photometry on P48 images \citep{Yao2019}. \
Values have not been corrected for Galactic extinction."

# Print the table headers
ncol = len(headings)
colstr = ""
colstr += 'l'
for col in np.arange(ncol - 1):
    colstr += "r"
print(colstr)
Beispiel #24
0
def run(spec_file,
        R,
        zgal,
        results_file=None,
        spec_fit='tmp.fits',
        chk=True,
        flux_scale=1.,
        atmos=[],
        gaps=[],
        wvmnx=(0., 1e9)):
    """
    Wrapper for running and handling outputs

    Args:
        spec_file (str or XSpectrum1D):
        R (float):
        zgal (float):
        results_file (str, optional):
        spec_fit:
        chk:
        flux_scale:
        atmos (list of tuple):
            List of (wvmin,wvmax) regions to mask during the analysis
        gaps:
        wvmnx:
    """
    # Init

    # Load spectrum
    if isinstance(spec_file, XSpectrum1D):
        spec = spec_file
    else:
        spec = readspec(spec_file)

    if chk:
        spec.plot()

    # Rebin
    wave = spec.wavelength.value

    diff = wave[1:] - wave[0:-1]
    meddiff = np.median(diff)
    print(meddiff)
    newwave = np.arange(wave[0], wave[-2], meddiff) * units.angstrom
    newspec = spec.rebin(newwave, do_sig=True, grow_bad_sig=True)

    # Scale to MUSE flux units
    newspec = XSpectrum1D.from_tuple(
        (newspec.wavelength.value, newspec.flux * flux_scale,
         newspec.sig * flux_scale))

    # Mask
    wave = newspec.wavelength.value
    goodpixels = np.where((wave >= wvmnx[0]) & (wave <= wvmnx[1]))[0]

    mask_lam = atmos
    mask_lam.extend(gaps)
    goodidxs = np.array([]).astype(int)
    for i, mrange in enumerate(mask_lam):
        theseidxs = np.where((wave < mrange[0]) | (wave > mrange[1]))[0]
        goodpixels = np.intersect1d(theseidxs, goodpixels)
    goodpixels = np.unique(goodpixels)
    goodpixels.sort()

    if chk:
        plt.clf()
        plt.plot(newspec.wavelength[goodpixels], newspec.flux[goodpixels])
        plt.show()

    # Run it
    ppfit, miles, star_weights = fit_spectrum(newspec,
                                              zgal,
                                              R,
                                              degree_mult=0,
                                              degree_add=3,
                                              goodpixels=goodpixels,
                                              reddening=1.,
                                              rebin=False)

    # Age
    age, metals = miles.mean_age_metal(star_weights)
    print('Age = {} Gyr'.format(age))
    print('Metals = {}'.format(metals))

    # Mass -- This is a bit approximate as Dwv is a guess for now
    actualflux = ppfit.bestfit * constants.L_sun.cgs / units.angstrom / (
        4 * np.pi * (cosmo.luminosity_distance(zgal).to(units.cm))**2 /
        (1 + zgal))
    # When fitting, the routine thought our data and model spectra had same units...
    Dwv = 1700.  # Ang, width of the band pass
    scfactor = np.median(ppfit.bestfit *
                         (units.erg / units.s / units.cm**2 / units.angstrom) /
                         actualflux) * Dwv
    # To get the actual model mass required to fit spectrum, scale by this ratio
    massmodels = scfactor * miles.total_mass(star_weights)
    print('log10 M* = {}'.format(np.log10(massmodels)))

    # Reddening
    print('E(B-V) = {}'.format(ppfit.reddening))

    # Write?
    if results_file is not None:
        dump_ppxf_results(ppfit, miles, zgal, results_file)
    if spec_fit is not None:
        bestfit = dump_bestfit(ppfit, outfile=spec_fit, z=zgal)

    # Final check
    if chk:
        bestfit = dump_bestfit(ppfit, z=zgal)
        plt.clf()
        plt.plot(newspec.wavelength, newspec.flux)
        plt.plot(bestfit.wavelength, bestfit.flux)
        plt.show()
Beispiel #25
0
def at2018cow(ax):
    """ Peak of 215.5 GHz light curve """
    d = Planck15.luminosity_distance(z=0.014).cgs.value
    t = 20
    nu = 215.5E9
    plot_point(ax, d, nu, t, 53.32, '*', name='AT2018cow')
Beispiel #26
0
from load_lc import get_uv_lc


def round_sig(x, sig=2):
    print(x)
    if x < 0:
        return -round(-x, sig - int(floor(log10(-x))) - 1)
    return round(x, sig - int(floor(log10(x))) - 1)


def ndec(num):
    dec = str(num).split('.')[-1]
    return len(dec)


d = Planck15.luminosity_distance(z=0.05403).cgs.value

headings = np.array(['Date', '$\Delta t$', 'Instr.', 'Filt.', 'Mag'])
label = "opt-phot"
caption = "Optical light curve of ZTF18aaqjovh from forced photometry on P48 images \citep{Yao2019}. Values have been corrected for Milky Way extinction. Phase is relative to $t_0$ defined in Section \ref{sec:discovery}."

# Print the table headers
ncol = len(headings)
colstr = ""
colstr += 'l'
for col in np.arange(ncol - 1):
    colstr += "r"
print(colstr)

colheadstr = ""
for col in np.arange(ncol - 1):
Zneb.set_index('label', inplace=True)
Zneb['Zneb_solar'] = 10**(Zneb['12+logOH'] - 8.69)
Zneb['Zneb_sig']   = 10**(Zneb['12+logOH'] + Zneb['sigZ'] - 8.69) - 10**(Zneb['12+logOH']  - 8.69)
Zneb.loc[Zneb['sigZ'] == -99, 'Zneb_sig'] = np.int(-99)
Zneb.loc[Zneb['sigZ'] == -9, 'Zneb_sig'] = np.int(-9)

# Load the Megasaura spectra
(sp, resoln, dresoln, LL, zz_sys, speclist) = jrr.mage.open_many_spectra(mage_mode, which_list="wcont", zchoice='neb', addS99=True, MWdr=True, silent=True, verbose=False)

wave_targ = 1500.0  # where to measure rest-frame flux density, Angstroms
wave_win = 10.      # window +-1500, to measure rest-frame flux density.  units are rest-frame Angstroms

print("label   EBV_S99  sigma_EBV  Zneb  sigZ  Z_S99   age_S99  fnu1500  fnu1500_dered  SFR_raw  SFR_dered")
print("#(SFRs and fnus not yet corrected for magnification)")
for label in speclist['short_label'] :
    fnu1500 = sp[label]['rest_fnu'].loc[sp[label]['rest_wave'].between(wave_targ - wave_win, wave_targ + wave_win)].median()
    if label in S99.index :  # if there's a EBV from S99 fit
        jrr.spec.deredden_internal_extinction(sp[label], S99.loc[label]['EBV'])
        fnu1500_dered = sp[label]['rest_fnu_dered'].loc[sp[label]['rest_wave'].between(wave_targ - wave_win, wave_targ + wave_win)].median()
        zz = speclist.loc[label]['z_syst']
        flux_to_lum = 4 * pi * (cosmo.luminosity_distance(zz).cgs.value)**2     # Apply luminosity distance
        K98_convert = 1.4E-28  # Eqn 1 of Kennicutt et al. 1998, units are erg/s/Hz
        SFR_raw   =  fnu1500 * flux_to_lum * K98_convert        # ** Magnification should be divided here
        SFR_dered =  fnu1500_dered * flux_to_lum * K98_convert  # ** Magnification should be divided here
        print(label, S99.loc[label]['EBV'],  S99.loc[label]['sigmaEBV'], Zneb.loc[label]['Zneb_solar'],  Zneb.loc[label]['Zneb_sig'], end=' ')
        print(S99.loc[label]['ZS99'],  S99.loc[label]['tS99'], fnu1500, fnu1500_dered, SFR_raw, SFR_dered)


# Make a new data frame w results, and dump it to a file.
galprops =  pandas.read_table("mage_galproperties.txt", delim_whitespace=True, comment="#")
def do_cleanup(catalog):
    """Task to cleanup catalog before final write."""
    task_str = catalog.get_current_task_str()

    # Set preferred names, calculate some columns based on imported data,
    # sanitize some fields
    keys = catalog.entries.copy().keys()

    cleanupcnt = 0
    for oname in pbar(keys, task_str):
        name = catalog.add_entry(oname)

        # Set the preferred name, switching to that name if name changed.
        name = catalog.entries[name].set_preferred_name()

        aliases = catalog.entries[name].get_aliases()
        catalog.entries[name].set_first_max_light()

        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['MLS', 'SSS', 'CSS', 'GRB ']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4],
                            alias.replace(prefix, '')[4:6]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = [
                'ASASSN-', 'PS1-', 'PS1', 'PS', 'iPTF', 'PTF', 'SCP-', 'SNLS-',
                'SPIRITS', 'LSQ', 'DES', 'SNHiTS', 'Gaia', 'GND', 'GNW', 'GSD',
                'GSW', 'EGS', 'COS', 'OGLE', 'HST'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:2]) and
                            is_number(alias.replace(prefix, '')[:1])):
                        discoverdate = '20' + alias.replace(prefix, '')[:2]
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['SNF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:4])):
                        discoverdate = ('/'.join([
                            alias.replace(prefix, '')[:4],
                            alias.replace(prefix, '')[4:6],
                            alias.replace(prefix, '')[6:8]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['PTFS', 'SNSDF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['AT', 'SN', 'OGLE-', 'SM ', 'KSN-']
            for alias in aliases:
                for prefix in prefixes:
                    if alias.startswith(prefix):
                        year = re.findall(r'\d+', alias)
                        if len(year) == 1:
                            year = year[0]
                        else:
                            continue
                        if alias.replace(prefix, '').index(year) != 0:
                            continue
                        if (year and is_number(year) and '.' not in year and
                                len(year) <= 4):
                            discoverdate = year
                            if catalog.args.verbose:
                                tprint('Added discoverdate from name [' +
                                       alias + ']: ' + discoverdate)
                            source = catalog.entries[name].add_self_source()
                            catalog.entries[name].add_quantity(
                                TIDALDISRUPTION.DISCOVER_DATE,
                                discoverdate,
                                source,
                                derived=True)
                            break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break

        if (TIDALDISRUPTION.RA not in catalog.entries[name] or
                TIDALDISRUPTION.DEC not in catalog.entries[name]):
            prefixes = [
                'PSN J', 'MASJ', 'CSS', 'SSS', 'MASTER OT J', 'HST J', 'TCP J',
                'MACS J', '2MASS J', 'EQ J', 'CRTS J', 'SMT J'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:6])):
                        noprefix = alias.split(':')[-1].replace(
                            prefix, '').replace('.', '')
                        decsign = '+' if '+' in noprefix else '-'
                        noprefix = noprefix.replace('+', '|').replace('-', '|')
                        nops = noprefix.split('|')
                        if len(nops) < 2:
                            continue
                        rastr = nops[0]
                        decstr = nops[1]
                        ra = ':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) + \
                            ('.' + rastr[6:] if len(rastr) > 6 else '')
                        dec = (decsign + ':'.join(
                            [decstr[:2], decstr[2:4], decstr[4:6]]) +
                            ('.' + decstr[6:] if len(decstr) > 6 else ''))
                        if catalog.args.verbose:
                            tprint('Added ra/dec from name: ' + ra + ' ' + dec)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.RA, ra, source, derived=True)
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DEC, dec, source, derived=True)
                        break
                if TIDALDISRUPTION.RA in catalog.entries[name]:
                    break

        no_host = (TIDALDISRUPTION.HOST not in catalog.entries[name] or
                   not any([
                       x[QUANTITY.VALUE] == 'Milky Way'
                       for x in catalog.entries[name][TIDALDISRUPTION.HOST]
                   ]))
        if (TIDALDISRUPTION.RA in catalog.entries[name] and
                TIDALDISRUPTION.DEC in catalog.entries[name] and no_host):
            from astroquery.irsa_dust import IrsaDust
            if name not in catalog.extinctions_dict:
                try:
                    ra_dec = (catalog.entries[name][TIDALDISRUPTION.RA][0][
                        QUANTITY.VALUE] + " " + catalog.entries[name][
                            TIDALDISRUPTION.DEC][0][QUANTITY.VALUE])
                    result = IrsaDust.get_query_table(ra_dec, section='ebv')
                except (KeyboardInterrupt, SystemExit):
                    raise
                except Exception:
                    warnings.warn("Coordinate lookup for " + name +
                                  " failed in IRSA.")
                else:
                    ebv = result['ext SandF mean'][0]
                    ebverr = result['ext SandF std'][0]
                    catalog.extinctions_dict[name] = [ebv, ebverr]
            if name in catalog.extinctions_dict:
                sources = uniq_cdl([
                    catalog.entries[name].add_self_source(),
                    catalog.entries[name]
                    .add_source(bibcode='2011ApJ...737..103S')
                ])
                (catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.EBV,
                    str(catalog.extinctions_dict[name][0]),
                    sources,
                    e_value=str(catalog.extinctions_dict[name][1]),
                    derived=True))
        if ((TIDALDISRUPTION.HOST in catalog.entries[name] and
             (TIDALDISRUPTION.HOST_RA not in catalog.entries[name] or
              TIDALDISRUPTION.HOST_DEC not in catalog.entries[name]))):
            for host in catalog.entries[name][TIDALDISRUPTION.HOST]:
                alias = host[QUANTITY.VALUE]
                if ' J' in alias and is_number(alias.split(' J')[-1][:6]):
                    noprefix = alias.split(' J')[-1].split(':')[-1].replace(
                        '.', '')
                    decsign = '+' if '+' in noprefix else '-'
                    noprefix = noprefix.replace('+', '|').replace('-', '|')
                    nops = noprefix.split('|')
                    if len(nops) < 2:
                        continue
                    rastr = nops[0]
                    decstr = nops[1]
                    hostra = (':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) +
                              ('.' + rastr[6:] if len(rastr) > 6 else ''))
                    hostdec = decsign + ':'.join([
                        decstr[:2], decstr[2:4], decstr[4:6]
                    ]) + ('.' + decstr[6:] if len(decstr) > 6 else '')
                    if catalog.args.verbose:
                        tprint('Added hostra/hostdec from name: ' + hostra +
                               ' ' + hostdec)
                    source = catalog.entries[name].add_self_source()
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_RA, hostra, source, derived=True)
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_DEC,
                        hostdec,
                        source,
                        derived=True)
                    break
                if TIDALDISRUPTION.HOST_RA in catalog.entries[name]:
                    break

        if (TIDALDISRUPTION.REDSHIFT not in catalog.entries[name] and
                TIDALDISRUPTION.VELOCITY in catalog.entries[name]):
            # Find the "best" velocity to use for this
            bestsig = 0
            for hv in catalog.entries[name][TIDALDISRUPTION.VELOCITY]:
                sig = get_sig_digits(hv[QUANTITY.VALUE])
                if sig > bestsig:
                    besthv = hv[QUANTITY.VALUE]
                    bestsrc = hv['source']
                    bestsig = sig
            if bestsig > 0 and is_number(besthv):
                voc = float(besthv) * 1.e5 / CLIGHT
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                (catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.REDSHIFT,
                    pretty_num(
                        sqrt((1. + voc) / (1. - voc)) - 1., sig=bestsig),
                    sources,
                    kind='heliocentric',
                    derived=True))
        if (TIDALDISRUPTION.REDSHIFT not in catalog.entries[name] and
                len(catalog.nedd_dict) > 0 and
                TIDALDISRUPTION.HOST in catalog.entries[name]):
            reference = "NED-D"
            refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
            for host in catalog.entries[name][TIDALDISRUPTION.HOST]:
                if host[QUANTITY.VALUE] in catalog.nedd_dict:
                    source = catalog.entries[name].add_source(
                        bibcode='2016A&A...594A..13P')
                    secondarysource = catalog.entries[name].add_source(
                        name=reference, url=refurl, secondary=True)
                    meddist = statistics.median(catalog.nedd_dict[host[
                        QUANTITY.VALUE]])
                    redz = z_at_value(cosmo.comoving_distance,
                                      float(meddist) * un.Mpc)
                    redshift = pretty_num(
                        redz, sig=get_sig_digits(str(meddist)))
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.REDSHIFT,
                        redshift,
                        uniq_cdl([source, secondarysource]),
                        kind='host',
                        derived=True)
        if (TIDALDISRUPTION.MAX_ABS_MAG not in catalog.entries[name] and
                TIDALDISRUPTION.MAX_APP_MAG in catalog.entries[name] and
                TIDALDISRUPTION.LUM_DIST in catalog.entries[name]):
            # Find the "best" distance to use for this
            bestsig = 0
            for ld in catalog.entries[name][TIDALDISRUPTION.LUM_DIST]:
                sig = get_sig_digits(ld[QUANTITY.VALUE])
                if sig > bestsig:
                    bestld = ld[QUANTITY.VALUE]
                    bestsrc = ld['source']
                    bestsig = sig
            if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                bestldz = z_at_value(cosmo.luminosity_distance,
                                     float(bestld) * un.Mpc)
                pnum = (float(catalog.entries[name][
                    TIDALDISRUPTION.MAX_APP_MAG][0][QUANTITY.VALUE]) - 5.0 *
                    (log10(float(bestld) * 1.0e6) - 1.0
                     ) + 2.5 * log10(1.0 + bestldz))
                pnum = pretty_num(pnum, sig=bestsig)
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.MAX_ABS_MAG, pnum, sources, derived=True)
        if TIDALDISRUPTION.REDSHIFT in catalog.entries[name]:
            # Find the "best" redshift to use for this
            bestz, bestkind, bestsig, bestsrc = catalog.entries[
                name].get_best_redshift()
            if bestsig > 0:
                try:
                    bestz = float(bestz)
                except Exception:
                    print(catalog.entries[name])
                    raise
                if TIDALDISRUPTION.VELOCITY not in catalog.entries[name]:
                    source = catalog.entries[name].add_self_source()
                    # FIX: what's happening here?!
                    pnum = CLIGHT / KM * \
                        ((bestz + 1.)**2. - 1.) / ((bestz + 1.)**2. + 1.)
                    pnum = pretty_num(pnum, sig=bestsig)
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.VELOCITY,
                        pnum,
                        source,
                        kind=PREF_KINDS[bestkind],
                        derived=True)
                if bestz > 0.:
                    from astropy.cosmology import Planck15 as cosmo
                    if TIDALDISRUPTION.LUM_DIST not in catalog.entries[name]:
                        dl = cosmo.luminosity_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name]
                            .add_source(bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.LUM_DIST,
                            pretty_num(
                                dl.value, sig=bestsig),
                            sources,
                            kind=PREF_KINDS[bestkind],
                            derived=True)
                        if (TIDALDISRUPTION.MAX_ABS_MAG not in
                                catalog.entries[name] and
                                TIDALDISRUPTION.MAX_APP_MAG in
                                catalog.entries[name]):
                            source = catalog.entries[name].add_self_source()
                            pnum = pretty_num(
                                float(catalog.entries[name][
                                    TIDALDISRUPTION.MAX_APP_MAG][0][
                                        QUANTITY.VALUE]) - 5.0 *
                                (log10(dl.to('pc').value) - 1.0
                                 ) + 2.5 * log10(1.0 + bestz),
                                sig=bestsig + 1)
                            catalog.entries[name].add_quantity(
                                TIDALDISRUPTION.MAX_ABS_MAG,
                                pnum,
                                sources,
                                derived=True)
                    if TIDALDISRUPTION.COMOVING_DIST not in catalog.entries[
                            name]:
                        cd = cosmo.comoving_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name]
                            .add_source(bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.COMOVING_DIST,
                            pretty_num(
                                cd.value, sig=bestsig),
                            sources,
                            derived=True)
        if all([
                x in catalog.entries[name]
                for x in [
                    TIDALDISRUPTION.RA, TIDALDISRUPTION.DEC,
                    TIDALDISRUPTION.HOST_RA, TIDALDISRUPTION.HOST_DEC
                ]
        ]):
            # For now just using first coordinates that appear in entry
            try:
                c1 = coord(
                    ra=catalog.entries[name][TIDALDISRUPTION.RA][0][
                        QUANTITY.VALUE],
                    dec=catalog.entries[name][TIDALDISRUPTION.DEC][0][
                        QUANTITY.VALUE],
                    unit=(un.hourangle, un.deg))
                c2 = coord(
                    ra=catalog.entries[name][TIDALDISRUPTION.HOST_RA][0][
                        QUANTITY.VALUE],
                    dec=catalog.entries[name][TIDALDISRUPTION.HOST_DEC][0][
                        QUANTITY.VALUE],
                    unit=(un.hourangle, un.deg))
            except (KeyboardInterrupt, SystemExit):
                raise
            except Exception:
                pass
            else:
                sources = uniq_cdl(
                    [catalog.entries[name].add_self_source()] + catalog.
                    entries[name][TIDALDISRUPTION.RA][0]['source'].split(',') +
                    catalog.entries[name][TIDALDISRUPTION.DEC][0]['source'].
                    split(',') + catalog.entries[name][TIDALDISRUPTION.HOST_RA]
                    [0]['source'].split(',') + catalog.entries[name][
                        TIDALDISRUPTION.HOST_DEC][0]['source'].split(','))
                if 'hostoffsetang' not in catalog.entries[name]:
                    hosa = Decimal(
                        hypot(c1.ra.degree - c2.ra.degree, c1.dec.degree -
                              c2.dec.degree))
                    hosa = pretty_num(hosa * Decimal(3600.))
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_OFFSET_ANG,
                        hosa,
                        sources,
                        derived=True,
                        u_value='arcseconds')
                if (TIDALDISRUPTION.COMOVING_DIST in catalog.entries[name] and
                        TIDALDISRUPTION.REDSHIFT in catalog.entries[name] and
                        TIDALDISRUPTION.HOST_OFFSET_DIST not in
                        catalog.entries[name]):
                    offsetsig = get_sig_digits(catalog.entries[name][
                        TIDALDISRUPTION.HOST_OFFSET_ANG][0][QUANTITY.VALUE])
                    sources = uniq_cdl(
                        sources.split(',') + (catalog.entries[name][
                            TIDALDISRUPTION.COMOVING_DIST][0]['source']).
                        split(',') + (catalog.entries[name][
                            TIDALDISRUPTION.REDSHIFT][0]['source']).split(','))
                    (catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_OFFSET_DIST,
                        pretty_num(
                            float(catalog.entries[name][
                                TIDALDISRUPTION.HOST_OFFSET_ANG][0][
                                QUANTITY.VALUE]) / 3600. * (pi / 180.) *
                            float(catalog.entries[name][
                                TIDALDISRUPTION.COMOVING_DIST][0][
                                    QUANTITY.VALUE]) * 1000. /
                            (1.0 + float(catalog.entries[name][
                                TIDALDISRUPTION.REDSHIFT][0][QUANTITY.VALUE])),
                            sig=offsetsig),
                        sources))

        catalog.entries[name].sanitize()
        catalog.journal_entries(bury=True, final=True, gz=True)
        cleanupcnt = cleanupcnt + 1
        if catalog.args.travis and cleanupcnt % 1000 == 0:
            break

    catalog.save_caches()

    return
Beispiel #29
0
def main():
    """
    # Script to get lineflux
    """
    # Get extraction
    data = np.genfromtxt("../data/spectroscopy/UVBext_lya.dat", dtype=None)

    wl = data[:, 1]
    wl_mask = (wl > 3850) & (wl < 3960)

    flux = data[:, 2]
    error = data[:, 3]

    wl, flux, error = wl[wl_mask], flux[wl_mask], error[wl_mask]

    error[error > 1e-15] = np.median(error)

    mask = (wl > convert_air_to_vacuum(3907) - 12) & (
        wl < convert_air_to_vacuum(3907) + 12)

    continuum_fit = np.polynomial.chebyshev.chebfit(wl[~mask],
                                                    flux[~mask],
                                                    deg=2)
    continuum = np.polynomial.chebyshev.chebval(wl, continuum_fit)

    pl.plot(wl, continuum - continuum)

    flux = flux - continuum

    F_lya = np.trapz(flux[mask], x=wl[mask])
    print("Total %0.1e" % F_lya)

    F_lya_err = np.sqrt(np.trapz((error**2.)[mask], x=wl[mask]))

    dL = cosmo.luminosity_distance(2.211).to(u.cm).value
    L_lya = F_lya * 4 * np.pi * dL**2
    L_lya_err = F_lya_err * 4 * np.pi * dL**2
    print(L_lya)
    SFR = 9.1e-43 * L_lya / 1.64
    SFR_err = 9.1e-43 * L_lya_err / 1.64
    print(SFR, SFR_err)

    # flux[flux_sky > 10000] = np.nan
    pl.plot(wl, flux, label="Spectrum")
    pl.plot(wl[mask], flux[mask], label="Integration limits")

    pl.errorbar(wl,
                flux,
                yerr=error,
                fmt=".k",
                capsize=0,
                elinewidth=0.5,
                ms=3,
                label=r"f$_{[L\alpha]}$ = %0.1e +- %0.1e" % (F_lya, F_lya_err))
    pl.errorbar(1,
                1,
                yerr=1,
                fmt=".k",
                capsize=0,
                elinewidth=0.5,
                ms=3,
                label="SFR = " + str(np.around(SFR, 0)) + " +-" +
                str(np.around(SFR_err, 0)))
    pl.xlim(3850, 3960)
    pl.ylim(-0.5e-17, 1e-17)

    # Save figure for tex
    pl.legend()
    pl.savefig("../figures/lya_flux.pdf", dpi="figure")
    pl.show()
Beispiel #30
0
        ttype_err[i] = 0
    elif rc3_match['T'][0] == '*':
        ttype_err[i] = 0
    else:
        ttype_err[i] = rc3_match['T'][0]

    ned_idx = np.where(ned_tot['col2'] == name)

    if name == 'ngc6789':
        lum_dist = 3.6e5
        lum_dist_low = 3.55e5
        lum_dist_up = 3.65e5
    else:
        z = ned_tot['col8'][ned_idx[0]]
        z_unc = ned_tot['col9'][ned_idx[0]]
        lum_dist = Cosmo.luminosity_distance(z).value * 1e5  # in 10pc
        lum_dist_up = Cosmo.luminosity_distance(z + z_unc).value * 1e5  # in 10pc
        lum_dist_low = Cosmo.luminosity_distance(z - z_unc).value * 1e5  # in 10pc

    if np.isnan(bulge) and ~np.isnan(disk):
        btot[i] = 0
        mtot_apparent = disk
        mtot_bulge_err[i] = 99
    else:
        if disk > bulge:
            btot[i] = (10 ** ((disk - bulge) / 2.5) / (10 ** ((disk - bulge) / 2.5) + 1.0))
        if disk == bulge:
            btot[i] = 0.5
        if disk < bulge:
            btot[i] = (1.0 / (10 ** ((bulge - disk) / 2.5) + 1.0))
        mtot_apparent = 21.581 - 2.5 * np.log10(10 ** ((21.581 - disk) / 2.5) + 10 ** ((21.581 - bulge) / 2.5))
Beispiel #31
0
def do_cleanup(catalog):
    """Cleanup catalog after importing all data."""
    task_str = catalog.get_current_task_str()

    # Set preferred names, calculate some columns based on imported data,
    # sanitize some fields
    keys = list(catalog.entries.keys())

    cleanupcnt = 0
    for oname in pbar(keys, task_str):
        # Some events may be merged in cleanup process, skip them if
        # non-existent.
        try:
            name = catalog.add_entry(oname)
        except Exception:
            catalog.log.warning(
                '"{}" was not found, suggests merge occurred in cleanup '
                'process.'.format(oname))
            continue

        # Set the preferred name, switching to that name if name changed.
        name = catalog.entries[name].set_preferred_name()

        aliases = catalog.entries[name].get_aliases()
        catalog.entries[name].purge_bandless_photometry()
        catalog.entries[name].set_first_max_light()

        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['MLS', 'SSS', 'CSS', 'GRB ']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4],
                            alias.replace(prefix, '')[4:6]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = [
                'ASASSN-', 'PS1-', 'PS1', 'PS', 'iPTF', 'PTF', 'SCP-', 'SNLS-',
                'SPIRITS', 'LSQ', 'DES', 'SNHiTS', 'Gaia', 'GND', 'GNW', 'GSD',
                'GSW', 'EGS', 'COS', 'OGLE', 'HST'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:2])
                            and is_number(alias.replace(prefix, '')[:1])):
                        discoverdate = '20' + alias.replace(prefix, '')[:2]
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['SNF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:4])):
                        discoverdate = ('/'.join([
                            alias.replace(prefix, '')[:4],
                            alias.replace(prefix, '')[4:6],
                            alias.replace(prefix, '')[6:8]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['PTFS', 'SNSDF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['AT', 'SN', 'OGLE-', 'SM ', 'KSN']
            for alias in aliases:
                for prefix in prefixes:
                    if alias.startswith(prefix):
                        year = re.findall(r'\d+', alias)
                        if len(year) == 1:
                            year = year[0]
                        else:
                            continue
                        if alias.replace(prefix, '').index(year) != 0:
                            continue
                        if (year and is_number(year) and '.' not in year
                                and len(year) <= 4):
                            discoverdate = year
                            if catalog.args.verbose:
                                tprint('Added discoverdate from name [' +
                                       alias + ']: ' + discoverdate)
                            source = catalog.entries[name].add_self_source()
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.DISCOVER_DATE,
                                discoverdate,
                                source,
                                derived=True)
                            break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break

        if (SUPERNOVA.RA not in catalog.entries[name]
                or SUPERNOVA.DEC not in catalog.entries[name]):
            prefixes = [
                'PSN J', 'MASJ', 'CSS', 'SSS', 'MASTER OT J', 'HST J', 'TCP J',
                'MACS J', '2MASS J', 'EQ J', 'CRTS J', 'SMT J'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:6])):
                        noprefix = alias.split(':')[-1].replace(prefix,
                                                                '').replace(
                                                                    '.', '')
                        decsign = '+' if '+' in noprefix else '-'
                        noprefix = noprefix.replace('+', '|').replace('-', '|')
                        nops = noprefix.split('|')
                        if len(nops) < 2:
                            continue
                        rastr = nops[0]
                        decstr = nops[1]
                        ra = ':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) + \
                            ('.' + rastr[6:] if len(rastr) > 6 else '')
                        dec = (
                            decsign +
                            ':'.join([decstr[:2], decstr[2:4], decstr[4:6]]) +
                            ('.' + decstr[6:] if len(decstr) > 6 else ''))
                        if catalog.args.verbose:
                            tprint('Added ra/dec from name: ' + ra + ' ' + dec)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                                           ra,
                                                           source,
                                                           derived=True)
                        catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                                           dec,
                                                           source,
                                                           derived=True)
                        break
                if SUPERNOVA.RA in catalog.entries[name]:
                    break

        no_host = (SUPERNOVA.HOST not in catalog.entries[name] or not any([
            x[QUANTITY.VALUE] == 'Milky Way'
            for x in catalog.entries[name][SUPERNOVA.HOST]
        ]))
        if (SUPERNOVA.RA in catalog.entries[name]
                and SUPERNOVA.DEC in catalog.entries[name] and no_host):
            from astroquery.irsa_dust import IrsaDust
            if name not in catalog.extinctions_dict:
                try:
                    ra_dec = catalog.entries[name][
                        SUPERNOVA.RA][0][QUANTITY.VALUE] + \
                        " " + \
                        catalog.entries[name][SUPERNOVA.DEC][0][QUANTITY.VALUE]
                    result = IrsaDust.get_query_table(ra_dec, section='ebv')
                except (KeyboardInterrupt, SystemExit):
                    raise
                except Exception:
                    warnings.warn("Coordinate lookup for " + name +
                                  " failed in IRSA.")
                else:
                    ebv = result['ext SandF mean'][0]
                    ebverr = result['ext SandF std'][0]
                    catalog.extinctions_dict[name] = [ebv, ebverr]
            if name in catalog.extinctions_dict:
                sources = uniq_cdl([
                    catalog.entries[name].add_self_source(),
                    catalog.entries[name].add_source(
                        bibcode='2011ApJ...737..103S')
                ])
                (catalog.entries[name].add_quantity(
                    SUPERNOVA.EBV,
                    str(catalog.extinctions_dict[name][0]),
                    sources,
                    e_value=str(catalog.extinctions_dict[name][1]),
                    derived=True))
        if ((SUPERNOVA.HOST in catalog.entries[name]
             and (SUPERNOVA.HOST_RA not in catalog.entries[name]
                  or SUPERNOVA.HOST_DEC not in catalog.entries[name]))):
            for host in catalog.entries[name][SUPERNOVA.HOST]:
                alias = host[QUANTITY.VALUE]
                if ' J' in alias and is_number(alias.split(' J')[-1][:6]):
                    noprefix = alias.split(' J')[-1].split(':')[-1].replace(
                        '.', '')
                    decsign = '+' if '+' in noprefix else '-'
                    noprefix = noprefix.replace('+', '|').replace('-', '|')
                    nops = noprefix.split('|')
                    if len(nops) < 2:
                        continue
                    rastr = nops[0]
                    decstr = nops[1]
                    hostra = (':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) +
                              ('.' + rastr[6:] if len(rastr) > 6 else ''))
                    hostdec = decsign + ':'.join([
                        decstr[:2], decstr[2:4], decstr[4:6]
                    ]) + ('.' + decstr[6:] if len(decstr) > 6 else '')
                    if catalog.args.verbose:
                        tprint('Added hostra/hostdec from name: ' + hostra +
                               ' ' + hostdec)
                    source = catalog.entries[name].add_self_source()
                    catalog.entries[name].add_quantity(SUPERNOVA.HOST_RA,
                                                       hostra,
                                                       source,
                                                       derived=True)
                    catalog.entries[name].add_quantity(SUPERNOVA.HOST_DEC,
                                                       hostdec,
                                                       source,
                                                       derived=True)
                    break
                if SUPERNOVA.HOST_RA in catalog.entries[name]:
                    break

        if (SUPERNOVA.REDSHIFT not in catalog.entries[name]
                and SUPERNOVA.VELOCITY in catalog.entries[name]):
            # Find the "best" velocity to use for this
            bestsig = 0
            for hv in catalog.entries[name][SUPERNOVA.VELOCITY]:
                sig = get_sig_digits(hv[QUANTITY.VALUE])
                if sig > bestsig:
                    besthv = hv[QUANTITY.VALUE]
                    bestsrc = hv['source']
                    bestsig = sig
            if bestsig > 0 and is_number(besthv):
                voc = float(besthv) * 1.e5 / CLIGHT
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                (catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT,
                    pretty_num(sqrt((1. + voc) / (1. - voc)) - 1.,
                               sig=bestsig),
                    sources,
                    kind='heliocentric',
                    derived=True))
        if (SUPERNOVA.REDSHIFT not in catalog.entries[name]
                and len(catalog.nedd_dict) > 0
                and SUPERNOVA.HOST in catalog.entries[name]):
            reference = "NED-D"
            refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
            refbib = "1991ASSL..171...89H"
            for host in catalog.entries[name][SUPERNOVA.HOST]:
                if host[QUANTITY.VALUE] in catalog.nedd_dict:
                    source = catalog.entries[name].add_source(
                        bibcode='2016A&A...594A..13P')
                    secondarysource = catalog.entries[name].add_source(
                        name=reference,
                        url=refurl,
                        bibcode=refbib,
                        secondary=True)
                    meddist = statistics.median(
                        catalog.nedd_dict[host[QUANTITY.VALUE]])
                    redz = z_at_value(cosmo.comoving_distance,
                                      float(meddist) * un.Mpc)
                    redshift = pretty_num(redz,
                                          sig=get_sig_digits(str(meddist)))
                    catalog.entries[name].add_quantity(
                        [SUPERNOVA.REDSHIFT, SUPERNOVA.HOST_REDSHIFT],
                        redshift,
                        uniq_cdl([source, secondarysource]),
                        kind='host',
                        derived=True)
        if (SUPERNOVA.MAX_ABS_MAG not in catalog.entries[name]
                and SUPERNOVA.MAX_APP_MAG in catalog.entries[name]
                and SUPERNOVA.LUM_DIST in catalog.entries[name]):
            # Find the "best" distance to use for this
            bestsig = 0
            for ld in catalog.entries[name][SUPERNOVA.LUM_DIST]:
                sig = get_sig_digits(ld[QUANTITY.VALUE])
                if sig > bestsig:
                    bestld = ld[QUANTITY.VALUE]
                    bestsrc = ld[QUANTITY.SOURCE]
                    bestsig = sig
            if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                bestldz = z_at_value(cosmo.luminosity_distance,
                                     float(bestld) * un.Mpc)
                pnum = (float(catalog.entries[name][SUPERNOVA.MAX_APP_MAG][0][
                    QUANTITY.VALUE]) - 5.0 *
                        (log10(float(bestld) * 1.0e6) - 1.0) +
                        2.5 * log10(1.0 + bestldz))
                pnum = pretty_num(pnum, sig=bestsig + 1)
                catalog.entries[name].add_quantity(SUPERNOVA.MAX_ABS_MAG,
                                                   pnum,
                                                   sources,
                                                   derived=True)
        if (SUPERNOVA.MAX_VISUAL_ABS_MAG not in catalog.entries[name]
                and SUPERNOVA.MAX_VISUAL_APP_MAG in catalog.entries[name]
                and SUPERNOVA.LUM_DIST in catalog.entries[name]):
            # Find the "best" distance to use for this
            bestsig = 0
            for ld in catalog.entries[name][SUPERNOVA.LUM_DIST]:
                sig = get_sig_digits(ld[QUANTITY.VALUE])
                if sig > bestsig:
                    bestld = ld[QUANTITY.VALUE]
                    bestsrc = ld[QUANTITY.SOURCE]
                    bestsig = sig
            if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                # FIX: what's happening here?!
                pnum = (float(catalog.entries[name][
                    SUPERNOVA.MAX_VISUAL_APP_MAG][0][QUANTITY.VALUE]) - 5.0 *
                        (log10(float(bestld) * 1.0e6) - 1.0))
                pnum = pretty_num(pnum, sig=bestsig + 1)
                catalog.entries[name].add_quantity(
                    SUPERNOVA.MAX_VISUAL_ABS_MAG, pnum, sources, derived=True)
        if SUPERNOVA.REDSHIFT in catalog.entries[name]:
            # Find the "best" redshift to use for this
            bestz, bestkind, bestsig, bestsrc = catalog.entries[
                name].get_best_redshift()
            if bestsig > 0:
                try:
                    bestz = float(bestz)
                except Exception:
                    print(catalog.entries[name])
                    raise
                if SUPERNOVA.VELOCITY not in catalog.entries[name]:
                    source = catalog.entries[name].add_self_source()
                    # FIX: what's happening here?!
                    pnum = CLIGHT / KM * \
                        ((bestz + 1.)**2. - 1.) / ((bestz + 1.)**2. + 1.)
                    pnum = pretty_num(pnum, sig=bestsig)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.VELOCITY,
                        pnum,
                        source,
                        kind=(SUPERNOVA.VELOCITY.kind_preference[bestkind]
                              if bestkind else ''))
                if bestz > 0.:
                    if SUPERNOVA.LUM_DIST not in catalog.entries[name]:
                        dl = cosmo.luminosity_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.LUM_DIST,
                            pretty_num(dl.value, sig=bestsig + 1),
                            sources,
                            kind=(SUPERNOVA.LUM_DIST.kind_preference[bestkind]
                                  if bestkind else ''),
                            derived=True)
                        if (SUPERNOVA.MAX_ABS_MAG not in catalog.entries[name]
                                and SUPERNOVA.MAX_APP_MAG
                                in catalog.entries[name]):
                            source = catalog.entries[name].add_self_source()
                            pnum = pretty_num(
                                float(catalog.entries[name][
                                    SUPERNOVA.MAX_APP_MAG][0][QUANTITY.VALUE])
                                - 5.0 * (log10(dl.to('pc').value) - 1.0) +
                                2.5 * log10(1.0 + bestz),
                                sig=bestsig + 1)
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.MAX_ABS_MAG,
                                pnum,
                                sources,
                                derived=True)
                        if (SUPERNOVA.MAX_VISUAL_ABS_MAG
                                not in catalog.entries[name]
                                and SUPERNOVA.MAX_VISUAL_APP_MAG
                                in catalog.entries[name]):
                            source = catalog.entries[name].add_self_source()
                            pnum = pretty_num(float(catalog.entries[name][
                                SUPERNOVA.MAX_VISUAL_APP_MAG][0][
                                    QUANTITY.VALUE]) - 5.0 *
                                              (log10(dl.to('pc').value) - 1.0),
                                              sig=bestsig + 1)
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.MAX_VISUAL_ABS_MAG,
                                pnum,
                                sources,
                                derived=True)
                    if SUPERNOVA.COMOVING_DIST not in catalog.entries[name]:
                        cd = cosmo.comoving_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.COMOVING_DIST,
                            pretty_num(cd.value, sig=bestsig),
                            sources,
                            derived=True)
        if SUPERNOVA.HOST_REDSHIFT in catalog.entries[name]:
            # Find the "best" redshift to use for this
            bestz, bestkind, bestsig, bestsrc = catalog.entries[
                name].get_best_redshift(SUPERNOVA.HOST_REDSHIFT)
            if bestsig > 0:
                try:
                    bestz = float(bestz)
                except Exception:
                    print(catalog.entries[name])
                    raise
                if SUPERNOVA.HOST_VELOCITY not in catalog.entries[name]:
                    source = catalog.entries[name].add_self_source()
                    # FIX: what's happening here?!
                    pnum = CLIGHT / KM * \
                        ((bestz + 1.)**2. - 1.) / ((bestz + 1.)**2. + 1.)
                    pnum = pretty_num(pnum, sig=bestsig)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.HOST_VELOCITY,
                        pnum,
                        source,
                        kind=(SUPERNOVA.HOST_VELOCITY.kind_preference[bestkind]
                              if bestkind else ''))
                if bestz > 0.:
                    if SUPERNOVA.HOST_LUM_DIST not in catalog.entries[name]:
                        dl = cosmo.luminosity_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.HOST_LUM_DIST,
                            pretty_num(dl.value, sig=bestsig + 1),
                            sources,
                            kind=(SUPERNOVA.HOST_LUM_DIST.
                                  kind_preference[bestkind]
                                  if bestkind else ''),
                            derived=True)
                    if SUPERNOVA.HOST_COMOVING_DIST not in catalog.entries[
                            name]:
                        cd = cosmo.comoving_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.HOST_COMOVING_DIST,
                            pretty_num(cd.value, sig=bestsig),
                            sources,
                            derived=True)
        if all([
                x in catalog.entries[name] for x in [
                    SUPERNOVA.RA, SUPERNOVA.DEC, SUPERNOVA.HOST_RA,
                    SUPERNOVA.HOST_DEC
                ]
        ]):
            # For now just using first coordinates that appear in entry
            try:
                c1 = coord(
                    ra=catalog.entries[name][SUPERNOVA.RA][0][QUANTITY.VALUE],
                    dec=catalog.entries[name][SUPERNOVA.DEC][0][
                        QUANTITY.VALUE],
                    unit=(un.hourangle, un.deg))
                c2 = coord(ra=catalog.entries[name][SUPERNOVA.HOST_RA][0][
                    QUANTITY.VALUE],
                           dec=catalog.entries[name][SUPERNOVA.HOST_DEC][0][
                               QUANTITY.VALUE],
                           unit=(un.hourangle, un.deg))
            except (KeyboardInterrupt, SystemExit):
                raise
            except Exception:
                pass
            else:
                sources = uniq_cdl([catalog.entries[name].add_self_source()] +
                                   catalog.entries[name][SUPERNOVA.RA][0][
                                       QUANTITY.SOURCE].split(',') +
                                   catalog.entries[name][SUPERNOVA.DEC][0][
                                       QUANTITY.SOURCE].split(',') +
                                   catalog.entries[name][SUPERNOVA.HOST_RA][0][
                                       QUANTITY.SOURCE].split(',') +
                                   catalog.entries[name][SUPERNOVA.HOST_DEC][0]
                                   [QUANTITY.SOURCE].split(','))
                if SUPERNOVA.HOST_OFFSET_ANG not in catalog.entries[name]:
                    hosa = Decimal(c1.separation(c2).arcsecond)
                    hosa = pretty_num(hosa)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.HOST_OFFSET_ANG,
                        hosa,
                        sources,
                        derived=True,
                        u_value='arcseconds')
                if (SUPERNOVA.COMOVING_DIST in catalog.entries[name]
                        and SUPERNOVA.REDSHIFT in catalog.entries[name]
                        and SUPERNOVA.HOST_OFFSET_DIST
                        not in catalog.entries[name]):
                    offsetsig = get_sig_digits(catalog.entries[name][
                        SUPERNOVA.HOST_OFFSET_ANG][0][QUANTITY.VALUE])
                    sources = uniq_cdl(
                        sources.split(',') +
                        (catalog.entries[name][SUPERNOVA.COMOVING_DIST][0][
                            QUANTITY.SOURCE]).split(',') +
                        (catalog.entries[name][SUPERNOVA.REDSHIFT][0][
                            QUANTITY.SOURCE]).split(','))
                    (catalog.entries[name].add_quantity(
                        SUPERNOVA.HOST_OFFSET_DIST,
                        pretty_num(
                            float(catalog.entries[name][
                                SUPERNOVA.HOST_OFFSET_ANG][0][QUANTITY.VALUE])
                            / 3600. * (pi / 180.) *
                            float(catalog.entries[name][
                                SUPERNOVA.COMOVING_DIST][0][QUANTITY.VALUE]) *
                            1000. / (1.0 + float(catalog.entries[name][
                                SUPERNOVA.REDSHIFT][0][QUANTITY.VALUE])),
                            sig=offsetsig), sources))

        catalog.entries[name].sanitize()
        catalog.journal_entries(bury=True, final=True, gz=True)
        cleanupcnt = cleanupcnt + 1
        if catalog.args.travis and cleanupcnt % 1000 == 0:
            break

    catalog.save_caches()

    return
Beispiel #32
0
""" compare light curves to PTF12gzk """

import numpy as np
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
plt.rc("text", usetex=True)
from astropy.table import Table
from astropy.cosmology import Planck15


DIST = Planck15.luminosity_distance(z=0.0137).cgs.value


def optical():
    # PTF12gzk
    dat = Table.read("table1.dat", format='ascii')
    jd = dat['col1']
    mag = dat['col4']
    emag = dat['col5']
    band = dat['col3']
    Mag = mag-33.8 # distance modulus
    dt = jd-jd[0]

    choose = np.logical_or(band == 'r', band == 'R,r')
    plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='red', fmt='.')

    choose = np.logical_or(band == 'g', band == 'g')
    plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='green', fmt='.')

    choose = np.logical_or(band == 'g', band == 'g')
    plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='green', fmt='.')
mu_mg = final_list.loc[:, 'MgII Mu Value from Gaussian Fitting'].values
mu_mg_er = final_list.loc[:, 'Error of MgII Mu from Gaussian Fitting'].values
mg_sig = final_list.loc[:, 'MgII Sigma Value from Gaussian Fitting'].values
mg_sig_er = final_list.loc[:,
                           'Error of MgII Sigma from Gaussian Fitting'].values
mg_k = final_list.loc[:, 'MgII K Value from Gaussian Fitting'].values
mg_k_er = final_list.loc[:, 'Error of MgII K from Gaussian Fitting'].values

name = (final_list.loc[:, 'Name'].values)
mjd = (final_list.loc[:, 'MJD'].values)
fiberid = (final_list.loc[:, 'Fiber ID'].values)
plate = (final_list.loc[:, 'Plate'].values)

C4_wav = np.linspace(1500, 1600, 1000)
Mg_wav = np.linspace(2750, 2850, 1000)
d = p15.luminosity_distance(redshift).to('cm').value


def FWHM(x, y, center):
    y_hm = y.max(axis=1) / 2
    hm_ix = abs(y - y_hm).argmin(axis=1)
    fwhm = 2 * abs(center - x[hm_ix])
    return A_to_kms(fwhm, center)


def A_to_kms(fwhm, m):
    return c * fwhm / m


def line_luminosity(wav, flux, dist):
    lum = 1e-17 * 4 * np.pi * dist**2 * flux
Beispiel #34
0
        maglims = tab['maglimit'].values
        if len(maglims) == 0:
            print(field, fn[ii])
        else:
            medlim = np.median(tab['maglimit'].values)
            print(str(ii) + "," + str(medlim))
            outputf.write(str(medlim) + '\n')
    outputf.close()


if __name__ == "__main__":
    #zquery = query.ZTFQuery()
    #get_seeing(zquery)

    # generate a grid of limiting magnitude and corresponding redshift
    maglim_grid = np.arange(16, 24, 1)
    z = np.array([0.035, 0.0546, 0.0847, 0.1302, 0.198, 0.298, 0.441, 0.647])
    # this is linear in the log(z) vs. limiting magnitude space

    maglim = np.loadtxt("seeing.txt", delimiter=',')[:, 1]

    # for each limiting magnitude, you have an effective volume for a transient
    # with a light curve like AT2018cow. Let's say that you would need to get
    # to an absolute magnitude of -20.
    zlim = np.zeros(len(maglim))
    dlim = np.zeros(len(maglim))
    for ii, lim in enumerate(maglim):
        zval = 10**(np.interp(lim, maglim_grid, np.log10(z)))
        zlim[ii] = zval
        dlim[ii] = Planck15.luminosity_distance(zval).value
def snr_distr_z(rho, z1, z2, z0, rhob0, mass1, mass2, ifo=['H1'], \
                  spline=None, power=4, renorm=True, universe='lambdacdm', mass_distr='fixed_source'):
    """
    Binned in distance snr distribution, including reduction of SNR from
    geometric effects. Exists only between z1 and z2, with a fiducial SNR rho0
    at r0.

    The default behavior is to assume a LambdaCDM universe described by
    Planck15 (universe='lambdacdm'). Setting universe='euclidean' will turn off
    redshifting, and treat z{0,1,2} as *distances* in Mpc.

    Evaluated at detected SNR rho.
    """

    mbid = (mass1, mass2, z1, z2, mass_distr)

    aligo = Network()
    aligo.add_psd('H1')

    if universe == "lambdacdm":
        r0, r1, r2 = Planck15.luminosity_distance((z0, z1, z2)).value

    elif universe == "euclidean":
        r0, r1, r2 = z0, z1, z2
        # Explicitly zero out the redshifts
        z0, z1, z2 = 0., 0., 0.
    else:
        raise ValueError(
            "Universe must be one of (lambdacdm, euclidean), got {0}".format(
                universe))
    # Cache the computationally difficult stuff so normalization calls are quick
    if mbid in _snr_cache:
        rhob1, rhob2 = _snr_cache[mbid]
    else:
        # Set the masses
        if mass_distr == 'fixed_source':
            m1, m2 = mass1 * (1 + z1), mass2 * (1 + z1)
        else:
            m1, m2 = mass1, mass2

        if r1 == 0.:
            rhob1 = np.inf
        else:
            tmplt = eventgen.Event.event_template(mass1=m1, mass2=m2, \
                                                    distance=r1)
            rhob1 = aligo.snr_at_ifo(tmplt, optimal=True, ifo="H1")

        tmplt = eventgen.Event.event_template(mass1=m1, mass2=m2, distance=r2)
        rhob2 = aligo.snr_at_ifo(tmplt, optimal=True, ifo="H1")

        _snr_cache[mbid] = rhob1, rhob2

    # Clip to minimum snr threshold
    rhob2 = max(rhob2, rhob0)

    if spline is None:
        exponent = power - 1
        # Set normalization in front of integral
        norm = exponent * (rhob0 * r0)**(exponent) / (r2**(exponent) -
                                                      r1**(exponent))

        # Clip limits limits for allowed amplitude range
        a, b = max(0, rho / rhob1), min(1, rho / rhob2)
        if a >= b:
            return 0.

        try:
            if power in _amp_mom_cache:
                amplitude_moment = _amp_mom_cache[power]
            else:
                amplitude_moment = intr_beta_moment(_beta_params,
                                                    kde,
                                                    m=exponent)
                _amp_mom_cache[power] = amplitude_moment

            intg = amplitude_moment(b) - amplitude_moment(a)

        except Exception as e:
            print rho / rhob2, rho / rhob1
            raise e

        return norm * intg * rho**(-power)

    else:

        def _int_fg(alpha):
            return spline(rho / alpha) / alpha * kde(alpha)

        # Clip limits limits for allowed amplitude range
        a, b = max(0, rho / rhob1), min(1, rho / rhob2)
        if a >= b:
            return 0.
        return fixed_quad(_int_fg, a, b)[0]
prior_moc=pymoc.util.catalog.catalog_to_moc(c,60,15)

radio_in_moc = inMoc(ra,dec,prior_moc)
#if radio_in_moc==False:
#    sys.exit()

lofar_coords = SkyCoord(ra,dec,unit='deg')
help_coords = SkyCoord(help_masterlist['ra'],help_masterlist['dec'],unit='deg')
radius = 2
idx_help, idx_lofar, d2d, d3d = lofar_coords.search_around_sky(
    help_coords, radius*u.arcsec)

#compute the predicted flux from the dust and usethis to construct the prior
from astropy.cosmology import Planck15 as cosmo
from astropy import units as u
f_pred=help_masterlist_ldust['bayes.dust.luminosity']/(4*np.pi*cosmo.luminosity_distance(help_masterlist_ldust['best.universe.redshift']).to(u.cm))
mask = np.isfinite(f_pred)
ldust_mask = (np.log10(f_pred)>8.5) & (np.isfinite(f_pred))
mips_mask = (help_masterlist['flag_optnir_det']>=5) & (help_masterlist['f_mips_24']>20)

prior_cat = help_masterlist_ldust[ldust_mask | mips_mask]

#if there is only one crossmatch within the search radius then match them if the source is in the prior list
#otherwise add the ra and dec of the lofar optical counterpart to the prior list
XID_rerun = []
source_type = []
ldust_mask = (np.log10(f_pred)>8.5) & (np.isfinite(f_pred))
mips_mask = (help_masterlist['flag_optnir_det']>=5) & (help_masterlist['f_mips_24']>20)
mask = [ldust_mask[idx_help] | mips_mask[idx_help]]
idx_true = idx_help[mask]
Beispiel #37
0
def LISASNR_AET(M,
                q,
                chi1,
                chi2,
                z,
                phi,
                inc,
                lambd,
                beta,
                psi,
                tobs=5.,
                minf=1e-5,
                maxf=1.,
                t0=0.,
                fRef=0.,
                npts=10000,
                variant='LISAproposal',
                L=2.5e9,
                WDbackground=False,
                WDduration=None):
    zval = z
    Mval = M
    qval = q

    # Masses
    m1 = Mval * qval / (1. + qval)
    m2 = Mval * 1 / (1. + qval)

    dist = cosmo.luminosity_distance(zval).value

    wftdi_rescaled = pyFDresponse.LISAGenerateTDI(phi,
                                                  fRef,
                                                  m1,
                                                  m2,
                                                  chi1,
                                                  chi2,
                                                  dist,
                                                  inc,
                                                  lambd,
                                                  beta,
                                                  psi,
                                                  tobs=tobs,
                                                  minf=minf,
                                                  maxf=maxf,
                                                  t0=t0,
                                                  settRefAtfRef=False,
                                                  tRef=0.,
                                                  TDItag='TDIAET',
                                                  order_fresnel_stencil=0,
                                                  nptmin=100,
                                                  rescaled=True,
                                                  L=L)

    tf = 1. / (2 * np.pi) * gwtools.spline(wftdi_rescaled['freq'],
                                           wftdi_rescaled['phase'])(
                                               wftdi_rescaled['freq'], 1)

    return LISAComputeSNR_TDIAET(wftdi_rescaled,
                                 df=None,
                                 cumul=False,
                                 npts=npts,
                                 variant=variant,
                                 L=L,
                                 WDbackground=WDbackground,
                                 WDduration=WDduration)
Beispiel #38
0
# In[8]:

#photoz

# ## Join CIGALE and photoz tables

# In[9]:

prior = join(cigale, photoz, keys='help_id')

# In[10]:

from astropy.cosmology import Planck15 as cosmo
from astropy import units as u
f_pred = prior['bayes.dust.luminosity'] / (
    4 * np.pi * cosmo.luminosity_distance(prior['z1_median']).to(u.cm))

# In[11]:

prior = prior[np.isfinite(
    f_pred.value)][np.log10(f_pred.value[np.isfinite(f_pred.value)]) > 8.5]

# In[12]:

prior['DEC'].name = 'Dec'

# ## Read in Maps

# In[13]:

im100fits = '../../dmu18/dmu18_HELP-PACS-maps/data/GAMA-09_PACS100_v0.9.fits'
Beispiel #39
0
def LISAtimetomergerofSNR(SNR,
                          M,
                          q,
                          chi1,
                          chi2,
                          z,
                          phi,
                          inc,
                          lambd,
                          beta,
                          psi,
                          tobs=5.,
                          minf=1e-5,
                          maxf=1.,
                          t0=0.,
                          fRef=0.,
                          npts=4000,
                          variant='LISAproposal',
                          L=2.5e9,
                          WDbackground=False,
                          WDduration=None):
    zval = z
    Mval = M
    qval = q

    # Masses
    m1 = Mval * qval / (1. + qval)
    m2 = Mval * 1 / (1. + qval)

    dist = cosmo.luminosity_distance(zval).value

    wftdi_rescaled = pyFDresponse.LISAGenerateTDI(phi,
                                                  fRef,
                                                  m1,
                                                  m2,
                                                  chi1,
                                                  chi2,
                                                  dist,
                                                  inc,
                                                  lambd,
                                                  beta,
                                                  psi,
                                                  tobs=tobs,
                                                  minf=minf,
                                                  maxf=maxf,
                                                  t0=t0,
                                                  settRefAtfRef=False,
                                                  tRef=0.,
                                                  TDItag='TDIAET',
                                                  order_fresnel_stencil=0,
                                                  nptmin=100,
                                                  rescaled=True,
                                                  L=L)

    tf = 1. / (2 * np.pi) * gwtools.spline(wftdi_rescaled['freq'],
                                           wftdi_rescaled['phase'])(
                                               wftdi_rescaled['freq'], 1)

    cumul_SNR = LISAComputeSNR_TDIAET(wftdi_rescaled,
                                      df=None,
                                      cumul=True,
                                      npts=npts,
                                      variant=variant,
                                      L=L,
                                      WDbackground=WDbackground,
                                      WDduration=WDduration)[0]

    # Cut freq at first max of tf
    if not np.any(np.diff(tf) <= 0):
        ilast_tf = len(tf) - 1
    else:
        ilast_tf = np.where(np.logical_not(np.diff(tf) > 0))[0][0]
    last_tf = tf[ilast_tf]
    margin = 1.  # Margin for representing ln(tflast - tf + margin)

    # Detectability threshold
    if not np.any(cumul_SNR > SNR):
        #print('Warning: cumul_SNR does not reach threshold.')
        tthreshold = np.nan
    else:
        if not np.any(cumul_SNR < SNR):
            #print('Warning: cumul_SNR exceeds threshold at first point ?')
            ithreshold = 0

        else:
            ithreshold = np.where(cumul_SNR < SNR)[0][-1]
        tthreshold = last_tf - tf[ithreshold] + margin

    return tthreshold