def findDist(self, **kwargs: {'calc': False}): try: if kwargs.get('calc') == True: #c=299792.458* u.km / u.second dh = const.c / cosmo.H(0) om = 0.27 ok = 1 - om oa = 0 #E=lambda x: sqrt(om*(1+x)**3+ok*(1+x)**2+oa) dc = dh * integrate.quad( lambda x: 1 / (sqrt(om * (1 + x)**3 + ok * (1 + x)**2 + oa)), 0, self.z) dm = dh * (1 / sqrt(ok)) * ( (np.exp(sqrt(ok) * (dc / dh)) - np.exp(-sqrt(ok) * (dc / dh))) / 2) dl = dm * (1 + self.z) return dl else: a = cosmo.luminosity_distance(self.z) #.value * u.Mpc return a except: s3 = 'The findDist() function failed for plate-ifu: ' + self.s log.error(s3) print(s3) return 0.0
def optical_depth(z): c = const.c.cgs H_0 = cosmo.H(0).cgs n_H0 = 1.9e-7*uni.cm**(-3) sigma_T = 6.65e-25*uni.cm**2 omega_m = 0.308 omega_lam = 0.692 return 2*n_H0*sigma_T*c/(3*H_0*omega_m)\ *(np.sqrt(omega_m*(1 + z)**3 + omega_lam) - 1)
def findDist(self): dh = 299792.458 / cosmo.H(0) om = 0.27 ok = 1 - om oa = 0 #E=lambda x: sqrt(om*(1+x)**3+ok*(1+x)**2+oa) dc = dh * integrate.quad( lambda x: 1 / (sqrt(om * (1 + x)**3 + ok * (1 + x)**2 + oa)), 0, self.z) dm = dh * (1 / sqrt(ok) * sinh(sqrt(ok) * (dc / dh))) dl = dm * (1 + self.z) a = cosmo.luminosity_distance(self.z).value return a
def convert_vel_dist(velocity, distance_unit, time_unit): """Takes as input a recession velocity in a given unit and converts it to a distance using Hubble's law with H(0) from astropy.cosmo """ velocity_unit = distance_unit/time_unit #making the velocity magnitude into a quantity velocity = make_quantity(velocity, velocity_unit) #obtaining distance from Hubble's lsw with H(0) the present day value of H distance = velocity / cosmo.H(0) #splitting the quantity into its value and distance for output distance_magnitude = distance.value distance_unit = str(distance.unit) return distance_magnitude, distance_unit
def blind_distances(final): ''' Given an LSS catalog instance with Z filled, return the same instance with (blinded) cosmo distances. Parameters ---------- final: :class:`astropy.table.Table` LSS catalog Table. Returns ------- lsscatalog: :class:`astropy.table.Table` LSS catalog Table. ''' from astropy.cosmology import WMAP9 as cosmo isin = final['Z'] > 0. final['COSMO_BLINDCHI'][isin] = (cosmo.H(0) / 100.) * cosmo.comoving_distance(final['Z'][isin]) return final
import matplotlib.pyplot as plt import math from astropy.cosmology import WMAP9 as cosmo from astropy import constants as c from astropy import units as u from astropy.visualization import quantity_support import scipy.integrate as integrate from pynverse import inversefunc #-------------------PARAMETERS----------------# #the speed of light c = (c.c).to(u.centimeter / u.second) #pi Pi = math.pi #Hubble constant in centimeter H = cosmo.H(0).to(u.centimeter / (u.centimeter * u.second)) #flux limit give by the paperv FluxLimit = (2.0 * 10**-8) * u.erg / (u.centimeter**2 * u.second) Omiga = 0.27 X = np.arange(0.1, 10, 0.01) bound = 0.757 #--------------------LOADING DATA-------------# x = loadtxt('Desktop/swift_DATA_Yu.txt', unpack=True, usecols=[0]) y = np.log10(loadtxt('Desktop/swift_DATA_Yu.txt', unpack=True, usecols=[1])) y1 = loadtxt('Desktop/swift_DATA_Yu.txt', unpack=True, usecols=[1]) Eta = [] k = np.arange(0, 3.5, 0.05) for i in range(len(k)): y2 = loadtxt('Desktop/swift_DATA_Yu.txt', unpack=True,
""" PHY2071: Introduction to Astonomy Comp. Project E. Schoenrock (c)2016 Script to solve friedmann equations in a FLAT UNIVERSE (K=0). Comparison between matter-dominated scenario and radiation-dominated scenario """ import numpy as np import matplotlib.pyplot as plt import scipy.constants as s from scipy.integrate import odeint from astropy.cosmology import WMAP9 as cosmo p_crit = np.square( cosmo.H(0) / 100) * 1.87e-29 #critical density of the universe at present const = (8 * s.pi * s.G) / 3 #Define this as a constant, makes life easier ##Matter Approximation Function def flat_universe(y, t): dy0 = np.sqrt(const * y[1] * np.square(y[0])) #friedmann eqn dy1 = -((3 * y[1]) * dy0) / y[0] #fluid eqn return [dy0, dy1] ##Radiation Approximation Function def flat_universe_rad(x, t): dx0 = np.sqrt(const * x[1] * np.square(x[0])) #friedmann eqn dx1 = -((4 * x[1]) * dx0) / x[0] #fluid eqn return [dx0, dx1]
def comov_vol(z, delta_z, omega_pix): return (omega_pix * ((cosmo.angular_diameter_distance(z).value)**2) * (sc.c / cosmo.H(z).value) * delta_z ) ###how to deal with delta z...
def comov_vol(z, delta_z, omega_pix): return ( omega_pix * ((cosmo.angular_diameter_distance(z).value)**2) * (sc.c / cosmo.H(0).value) * (1 / np.sqrt((0.31 * ((1 + z)**3)) + (0.69))) * ((1 + z)**2) * delta_z) ###how to deal with delta z...
Given the above difference in the speeds of the proton and light, what will be the separation between these high energy proton and photon after: 1. one year? 2. Hubble time? --- Hubble time is the age of the universe ~ 13.8 Billion years, inverse of which gives the Hubble's constant $H_0$. # Table of c - v as a function of mass cmv = c - v # Calculate the separation over a year dist = cmv*u.year disty = dist.decompose() # Calculate the Hubble time t_h = 1/cosmo.H(0).decompose() print(f"Hubble constant = {cosmo.H(0):.2f}") print(f"Hubble time = {t_h:.2e}") print("-"*60) sep = cmv*t_h seph = sep.decompose() print(" mp \t\t c-v \t sep. (1yr) sep. (Hubble)") print("-"*60) for i in range(0,len(cmv),10): mpgev = (mpv * c *c).to(u.GeV)[i] print(f"{mpgev:8.2f} {cmv[i]:10.2} {disty[i]:10.2} {seph[i]:10.2}") ### Q 2.3 What will be gamma factor and mass of the proton when the separation after Hubble time is Compton length?
def simplify_catalog(mastercat, quickld=True): """ Removes most of the unnecessary columns from the master catalog and joins fields where relevant Parameters ---------- mastercat : astropy.table.Table The table from initial_catalog quickld : bool If True, means do the "quick" version of the luminosity distance calculation (takes <1 sec as opposed to a min or so, but is only good to a few kpc) """ from astropy import table from astropy.constants import c ckps = c.to(u.km / u.s).value tab = table.Table() #RADEC: # use NSA unless it's missing, in which case use LEDA ras = mastercat['al2000'] * 15 ras[~mastercat['RA'].mask] = mastercat['RA'][~mastercat['RA'].mask] decs = mastercat['de2000'] decs[~mastercat['DEC'].mask] = mastercat['DEC'][~mastercat['DEC'].mask] tab.add_column(table.MaskedColumn(name='RA', data=ras, unit=u.deg)) tab.add_column(table.MaskedColumn(name='Dec', data=decs, unit=u.deg)) #Names/IDs: pgc = mastercat['pgc'].copy() pgc.mask = mastercat['pgc'] < 0 tab.add_column(table.MaskedColumn(name='PGC#', data=pgc)) tab.add_column(table.MaskedColumn(name='NSAID', data=mastercat['NSAID'])) #do these in order of how 'preferred' the object name is. nameorder = ('Objname', 'Name_eddkk', 'objname', 'Name_2mass' ) # this is: EDD, KK, LEDA, 2MASS #need to figure out which has the *largest* name strings, because we have a fixed number of characters largestdt = np.dtype('S1') for nm in nameorder: if mastercat.dtype[nm] > largestdt: largestdt = mastercat.dtype[nm] largestdtnm = nm names = mastercat[largestdtnm].copy( ) # these will all be overwritten - just use it for shape for nm in nameorder: msk = ~mastercat[nm].mask names[msk] = mastercat[nm][msk] tab.add_column(table.MaskedColumn(name='othername', data=names)) #After this, everything should have either an NSAID, a PGC#, or a name (or more than one) #VELOCITIES/redshifts #start with LEDA vs = mastercat['v'].astype(float) v_errs = mastercat['e_v'].astype(float) #Now add vhelio from the the EDD eddvhel = mastercat['Vhel_eddkk'] vs[~eddvhel.mask] = eddvhel[~eddvhel.mask] #EDD has no v-errors, so mask them v_errs[~eddvhel.mask] = 0 v_errs.mask[~eddvhel.mask] = True #then the NSA *observed* velocity, if available (NOT the same as distance) vs[~mastercat['Z'].mask] = mastercat['Z'][~mastercat['Z'].mask] * ckps v_errs.mask[~mastercat['Z'].mask] = True #v_errs[~mastercat['Z_ERR'].mask] = mastercat['Z_ERR'][~mastercat['Z_ERR'].mask] * ckps #finally, KK when present if its not available from one of the above kkvh = mastercat['Vh'] vs[~kkvh.mask] = kkvh[~kkvh.mask] #KK has no v-errors, so mask them v_errs[~kkvh.mask] = 0 v_errs.mask[~kkvh.mask] = True #DISTANCES #start with all inf, and all masked dist = np.ones_like(mastercat['Dist_edd']) * np.inf dist.mask[:] = True #first populate those that are in EDD with CMD-based distance msk = mastercat['So_eddkk'] == 1 dist[msk] = mastercat['Dist_edd'][msk] #now populate from the NSA if not in the above msk = (dist.mask) & (~mastercat['ZDIST'].mask) dist[msk] = mastercat['ZDIST'][msk] * ckps / WMAP9.H(0).value #finally, add in anything in the KK that's not elsewhere msk = (dist.mask) & (~mastercat['Dist_kk'].mask) dist[msk] = mastercat['Dist_kk'][msk] # #for those *without* EDD or KK, use the redshift's luminosity distance # premsk = dist.mask.copy() # zs = vs[premsk]/ckps # if quickld: # ldx = np.linspace(zs.min(), zs.max(), 1000) # ldy = WMAP9.luminosity_distance(ldx).to(u.Mpc).value # ld = np.interp(zs, ldx, ldy) # else: # ld = WMAP9.luminosity_distance(zs).to(u.Mpc).value # dist[premsk] = ld # dist.mask[premsk] = vs.mask[premsk] distmod = 5 * np.log10(dist) + 25 # used in phot section tab.add_column(table.MaskedColumn(name='vhelio', data=vs)) #decided to remove v-errors #tab.add_column(table.MaskedColumn(name='vhelio_err', data=v_errs)) tab.add_column(table.MaskedColumn(name='distance', data=dist, unit=u.Mpc)) #PHOTOMETRY tab.add_column( table.MaskedColumn(name='r', data=mastercat['ABSMAG_r'] + distmod)) tab.add_column( table.MaskedColumn(name='i', data=mastercat['ABSMAG_i'] + distmod)) tab.add_column( table.MaskedColumn(name='z', data=mastercat['ABSMAG_z'] + distmod)) tab.add_column(table.MaskedColumn(name='I', data=mastercat['it'])) tab.add_column(table.MaskedColumn(name='K', data=mastercat['K_tc'])) tab.add_column(table.MaskedColumn(name='K_err', data=mastercat['e_K'])) #Stellar mass/SFR tab.add_column( table.MaskedColumn(name='M_star', data=mastercat['MASS'] * (WMAP9.H(0).value / 100)**-2)) tab.add_column(table.MaskedColumn(name='SFR_B300', data=mastercat['B300'])) tab.add_column( table.MaskedColumn(name='SFR_B1000', data=mastercat['B1000'])) return tab
import numpy as np import matplotlib.pyplot as plt from astropy.cosmology import WMAP9 as cosmo import astropy.units as u import astropy.constants as const c = const.c.cgs # speed of light in cgs H0 = cosmo.H(0).cgs # Hubble constant in cgs Sigma_T = 6.65e-25 * u.cm**2 # Thomson cross section of molecule [cm^2] O_m = 0.308 # energy density parameter for mass O_lambda = 0.692 # energy density parameter for lambda def tau_e(z): """ Returns the optical depth for the ionized intergalactic medium as a function of redshift z. Parameters: ----------- z: redshift at which the optical depth is to be calculated """ _z = np.linspace( 0, z, 100) # New redshift array to define limits in the integral numerator = Sigma_T * 1.9e-7 * u.cm**-3 * (1 + _z)**2 denominator = H0 * np.sqrt(O_m * (1 + _z)**3 + O_lambda) integral = c * numerator / denominator res = np.trapz(integral, _z) return (res)
def lce_norm(ns): E=Omega_m*(1+step2z[ns])**3+(1-Omega_m) f=(Omega_m*(1+step2z[ns])**3/E)**0.6 H=cosmo.H(step2z[ns]).value k_unit=2*np.pi/boxlen return f*H/k_unit/(1+step2z[ns])
def lce_norm(): E=Omega_m*(1+z)**3+(1-Omega_m) f=(Omega_m*(1+z)**3/E)**0.6 H=cosmo.H(z).value k_unit=2*np.pi/boxlen return f*H/k_unit/(1+z)/300000
# Author = Z. Yasemin Kalender #table and plot packages from astropy.io import ascii import numpy as np import matplotlib.pyplot as plt from astropy.table import Table from astropy.io import fits # cosmology packages from astropy.cosmology import WMAP9 as cosmo from astropy.cosmology import FlatwCDM, FlatLambdaCDM import astropy.units as u from astropy.cosmology import Planck13, z_at_value ##### Some Cosmological Constants.. H0 = cosmo.H(0) cosmo = FlatwCDM(name='SNLS3+WMAP7', H0=71.58, Om0=0.262, w0=-1.016) # FRIEDMAN 2015 PAPER TABLE 9 path = '/Users/zeynepyaseminkalender/Documents/MyFiles_Pyhton/week5' #obtaining Data needed Friedman_data9 = Table.read('friedman_table9.fit') print(Friedman_data9) #redshift value z_values = Friedman_data9["zCMB"]
from scipy.optimize import curve_fit from scipy.misc import factorial import numpy.random as rm import scipy.stats as stats from multiprocessing import Pool, Process, freeze_support import multiprocessing import argparse G = ct.G c = ct.c Msun = 1.989e30 pi = ct.pi noisecurvepts = 900 Hubble_Parameter = WMAP9.H(0.0).value #km/sec/Mpc h = Hubble_Parameter / 100.0 Omega_M = WMAP9.Om(0.0) Omega_vac = 1.0 - Omega_M #this is to cut out scale factors that occur before the first real mergers????? def scaleadjustfunc(scale_factor): change = (scale_factor <= 1.0 / 138.0) keep = (scale_factor > 1.0 / 138.0) return scale_factor * keep + (1.0 / 138.0) * change def Least_squared_binned_fit(counts): atest = np.linspace(0.001, 20.0, 10000) chisquared = np.zeros(len(atest))
plt.xlabel('v/c') plt.ylabel('KE') plt.grid(True) plt.show() ### 3.3 Negatons as possible candidates for dark matter and dark energy Gravitationally, dark matter (DM) behaves like baryonic matter, i.e. they have $P=+nm_{DM} v^2$. Therefore DM cannot consist dominantly of negatons. In any case the flux of negatons required to significantly affect masses and evolution of supermassive black holes and even large objects (large capture cross section) like red supergiants is very large. For a billion solar mass black hole, the flux required to significantly alter its mass over the Hubble time ($t_H \approx 4.45\times10^{17} s$) is given as: $f=\frac{M_{BH}}{(m \times t_H \times A_{BH})} \approx 2.44\times10^{22} m^{-2} s^{-1} \quad (9)$ where, $A_{BH} \approx 10^{26} m^2$ is the horizon area of the black hole, and the negaton mass, $m$ is taken to be of the order of the proton mass. And similarly we can work out the flux required to alter the evolution of red supergiants as, $f \approx 10^{16} m^{-2} s^{-1}$. These are much higher than the expected DM density of $\approx 2\times 10^{-27} kgm^{-3}$. So negative mass particles cannot be a source of DM. And in any case their energy density being negative, they will not be suitable candidates. m_bh = 1.e9 * M_sun a_bh = (16 * math.pi * G**2 * m_bh**2) / c**4 t_h = 1/cosmo.H(0).decompose() flux = m_bh / (t_h * a_bh * m_p) print("Hubble constant = {0:.2e}".format(cosmo.H(0))) print("Hubble time = {0:.2e}".format(t_h)) print("flux = {0:.2e}".format(flux)) But dark energy (DE) can result from negatons through $P=-nmc^2$ (negative $\rho$ implying repulsive gravity). So density of such particles giving rise to DE can be estimated as follows. Assuming a separation between the particles of a Compton length, i.e. $\hbar/mc$, which is also positive for negatons (since both ℏ and m are negative) (negatons also obey the quantum uncertainty principle), the density is given by, $\frac{m}{(\hbar/mc)^3} =\frac{m^4 c^3}{\hbar ^3}$ , which is negative (since $m^4$ is positive and $\hbar^3$ is negative for negatons). Equating this to the observed DE density, $\rho_{DE}\approx 7\times 10^{-27} kgm^{-3}$, we can estimate the mass of these particles as: $m=\left(\frac{\rho_{DE} \hbar^3}{c^3}\right)^{1/4}\approx 4\times 10^{-39} kg \quad (10)$ For these negative mass particles to make up dark energy of density $\approx 7\times 10^{-27} kgm^{-3}$, the number density is given by, $n \times m \approx 7\times 10^{-27} kgm^{-3}$. This gives: $n\approx 10^{12} m^{-3} \quad (11)$ At earlier epochs (high $z$), the matter (and DM) densities would have been higher and as the Universe expands, the negative pressure due to the negatons starts to dominate, hence accounting for the accelerated expansion of the Universe observed at the present epoch.
def set_value(x): LOG.info('Setting value...') return cosmo.H(0).value