def ew_teff_lyman( ilambda, zem, fN_model, NHI_MIN=11.5, NHI_MAX=22.0, N_eval=5000, EW_spline=None, bval=24.0, fNz=False, cosmo=None, debug=False, cumul=None, verbose=False, ): """ tau effective (follows ew_teff_lyman.pro from XIDL) teff = ew_teff_lyman(3400., 2.4) Parameters: ------------- ilambda: float Observed wavelength zem: float Emission redshift of the source [sets which Lyman lines are included] bva: float -- Characteristics Doppler parameter for the Lya forest -- [Options: 24, 35 km/s] NHI_MIN: float -- Minimum log HI column for integration [default = 11.5] NHI_MAX: float -- Maximum log HI column for integration [default = 22.0] fNz: Boolean (False) -- Inputs f(N,z) instead of f(N,X) cosmo: astropy.cosmology (None) -- Cosmological model to adopt (as needed) cumul: List of cumulative sums -- Recorded only if cumul is not None Returns: teff: Total effective opacity of all lines contributing ToDo: 1. Parallelize the Lyman loop JXP 07 Nov 2014 """ # Lambda if not isinstance(ilambda, float): raise ValueError("igm.tau_eff: ilambda must be a float for now") Lambda = ilambda if not isinstance(Lambda, u.quantity.Quantity): Lambda = Lambda * u.AA # Ang # Read in EW spline (if needed) if EW_spline == None: if int(bval) == 24: EW_FIL = xa_path + "/igm/EW_SPLINE_b24.p" elif int(bval) == 35: EW_FIL = os.environ.get("XIDL_DIR") + "/IGM/EW_SPLINE_b35.fits" else: raise ValueError("igm.tau_eff: Not ready for this bvalue %g" % bval) EW_spline = pickle.load(open(EW_FIL, "rb")) # Lines wrest = tau_eff_llist() # Find the lines gd_Lyman = wrest[(Lambda / (1 + zem)) < wrest] nlyman = len(gd_Lyman) if nlyman == 0: if verbose: print("igm.tau_eff: No Lyman lines covered at this wavelength") return 0 # N_HI grid lgNval = NHI_MIN + (NHI_MAX - NHI_MIN) * np.arange(N_eval) / (N_eval - 1) # Base 10 dlgN = lgNval[1] - lgNval[0] Nval = 10.0 ** lgNval teff_lyman = np.zeros(nlyman) # For cumulative if not cumul is None: cumul.append(lgNval) # Loop on the lines for qq, line in enumerate(gd_Lyman): # Would be great to do this in parallel... # (Can pack together and should) # Redshift zeval = ((Lambda / line) - 1).value if zeval < 0.0: teff_lyman[qq] = 0.0 continue # Cosmology if fNz is False: if cosmo not in locals(): cosmo = FlatLambdaCDM(H0=70, Om0=0.3) # Vanilla # dxdz = (np.fabs(xigmu.cosm_xz(zeval-0.1, cosmo=cosmo)- # xigmu.cosm_xz(zeval+0.1,cosmo=cosmo)) / 0.2 ) # xdb.set_trace() dxdz = xigmu.cosm_xz(zeval, cosmo=cosmo, flg=1) else: dxdz = 1.0 # Code is using f(N,z) # print('dxdz = %g' % dxdz) # Get EW values (could pack these all together) idx = np.where(EW_spline["wrest"] == line)[0] if len(idx) != 1: raise ValueError("tau_eff: Line %g not included or over included?!" % line) restEW = interpolate.splev(lgNval, EW_spline["tck"][idx], der=0) # dz dz = ((restEW * u.AA) * (1 + zeval) / line).value # Evaluate f(N,X) at zeval log_fnX = fN_model.eval(lgNval, zeval).flatten() # xdb.set_trace() # Sum intgrnd = 10.0 ** (log_fnX) * dxdz * dz * Nval teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.0) if not cumul is None: cumul.append(np.cumsum(intgrnd) * dlgN * np.log(10.0)) # xdb.set_trace() # Debug if debug == True: xdb.xplot(lgNval, np.log10(10.0 ** (log_fnX) * dxdz * dz * Nval)) # x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc # printcol, lgnval, log_fnx, dz, alog10(10.d^(log_fnX) * dxdz * dz * Nval) # writecol, 'debug_file'+strtrim(qq,2)+'.dat', lgNval, restEW, log_fnX xdb.set_trace() # xdb.set_trace() return np.sum(teff_lyman)
def eval(self, NHI, z, vel_array=None, cosmo=None): """ Evaluate the f(N,X) model at a set of NHI values Parameters: NHI: array NHI values z: float or array Redshift for evaluation vel_array: array Velocities relative to z Returns: log_fNX: float, array, or 2D array f(NHI,X)[z] values Float if given one NHI,z value each. Otherwise 2D array If 2D, it is [NHI,z] on the axes JXP 07 Nov 2014 """ # Exception checking? # Imports from astropy import constants as const # Tuple? if isinstance(NHI, tuple): # All values packed into NHI parameter z = NHI[1] NHI = NHI[0] flg_1D = 1 else: # NHI and z separate flg_1D = 0 # NHI if isiterable(NHI): NHI = np.array(NHI) # Insist on array else: NHI = np.array([NHI]) lenNHI = len(NHI) # Redshift if vel_array is not None: z_val = z + (1 + z) * vel_array / (const.c.cgs.value / 1e5) else: z_val = z if isiterable(z_val): z_val = np.array(z_val) else: z_val = np.array([z_val]) lenz = len(z_val) # Check on zmnx bad = np.where((z_val < self.zmnx[0]) | (z_val > self.zmnx[1]))[0] if len(bad) > 0: raise ValueError( 'fN.model.eval: z={:g} not within self.zmnx={:g},{:g}'.format( z_val[bad[0]], *(self.zmnx))) if self.fN_mtype == 'Hspline': # Evaluate without z dependence log_fNX = self.model.__call__(NHI) # Evaluate #xdb.set_trace() if (not isiterable(z_val)) | (flg_1D == 1): # scalar or 1D array wanted log_fNX += self.gamma * np.log10( (1 + z_val) / (1 + self.zpivot)) else: # Matrix algebra to speed things up lgNHI_grid = np.outer(log_fNX, np.ones(len(z_val))) lenfX = len(log_fNX) #xdb.set_trace() # z_grid1 = 10**(np.outer( np.ones(lenfX) * self.gamma, np.log10(1 + z_val))) #; (1+z)^gamma z_grid2 = np.outer( np.ones(lenfX) * ((1. / (1 + self.zpivot))**self.gamma), np.ones(len(z_val))) log_fNX = lgNHI_grid + np.log10(z_grid1 * z_grid2) # Gamma function (e.g. Inoue+14) elif self.fN_mtype == 'Gamma': # Setup the parameters Nl, Nu, Nc, bval = self.param[0] # gNHI Bi = self.param[1] ncomp = len(Bi) log_gN = np.zeros((lenNHI, ncomp)) beta = [item[1] for item in self.param[2:]] for kk in range(ncomp): #xdb.set_trace() log_gN[:, kk] += (np.log10(Bi[kk]) + NHI * (-1 * beta[kk]) + (-1. * 10.**(NHI - Nc) / np.log(10)) ) # log10 [ exp(-NHI/Nc) ] # f(z) fz = np.zeros((lenz, 2)) # Loop on NHI for kk in range(ncomp): if kk == 0: # LyaF zcuts = self.param[2][2:4] gamma = self.param[2][4:] else: # DLA zcuts = [self.param[3][2]] gamma = self.param[3][3:] zcuts = [0] + list(zcuts) + [999.] Aval = self.param[2 + kk][0] # Cut on z for ii in range(1, len(zcuts)): izcut = np.where((z_val < zcuts[ii]) & (z_val > zcuts[ii - 1]))[0] liz = len(izcut) # Evaluate (at last!) #xdb.set_trace() if (ii <= 2) & (liz > 0): fz[izcut, kk] = Aval * ((1 + z_val[izcut]) / (1 + zcuts[1]))**gamma[ii - 1] elif (ii == 3) & (liz > 0): fz[izcut, kk] = Aval * (((1 + zcuts[2]) / (1 + zcuts[1]))**gamma[ii - 2] * ((1 + z_val[izcut]) / (1 + zcuts[2]))**gamma[ii - 1]) # else: # raise ValueError('fN.model.eval: Should not get here') # dX/dz dXdz = igmu.cosm_xz(z_val, cosmo=cosmo, flg=1) # Final steps if flg_1D == 1: # #xdb.set_trace() fnX = np.sum(fz * 10.**log_gN, 1) / dXdz log_fNX = np.log10(fnX) else: # Generate the matrix fnz = np.zeros((lenNHI, lenz)) for kk in range(ncomp): fnz += np.outer(10.**log_gN[:, kk], fz[:, kk]) # Finish up log_fNX = np.log10(fnz) - np.log10( np.outer(np.ones(lenNHI), dXdz)) else: raise ValueError( 'fN.model: Not ready for this model type {:%s}'.format( self.fN_mtype)) # Return if (lenNHI + lenz) == 2: return log_fNX.flatten()[0] # scalar else: return log_fNX
def teff_ll(self, z912, zem, N_eval=5000, cosmo=None): """ Calculate teff_LL Effective opacity from LL absorption at z912 from zem Parameters: z912: float Redshift for evaluation zem: float Redshift of source cosmo: astropy.cosmology (None) Cosmological model to adopt (as needed) N_eval: int (5000) Discretization parameter Returns: zval, teff_LL: array z values and Effective opacity from LL absorption from z912 to zem JXP 10 Nov 2014 """ # Imports from astropy import constants as const # NHI array lgNval = 11.5 + 10.5 * np.arange(N_eval) / ( N_eval - 1.) #; This is base 10 [Max at 22] dlgN = lgNval[1] - lgNval[0] Nval = 10.**lgNval #; z array zval = z912 + (zem - z912) * np.arange(N_eval) / (N_eval - 1.) dz = np.fabs(zval[1] - zval[0]) teff_LL = np.zeros(N_eval) # dXdz dXdz = igmu.cosm_xz(zval, cosmo=cosmo, flg=1) #if keyword_set(FNZ) then dXdz = replicate(1.,N_eval) # Evaluate f(N,X) velo = (zval - zem) / (1 + zem) * (const.c.cgs.value / 1e5 ) # Kludge for eval [km/s] log_fnX = self.eval(lgNval, zem, vel_array=velo) log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz)) # Evaluate tau(z,N) teff_engy = (const.Ryd.to(u.eV, equivalencies=u.spectral()) / ((1 + zval) / (1 + zem))) sigma_z = xai.photo_cross(1, 1, teff_engy) #xdb.set_trace() #sigma_z = teff_cross * ((1+zval)/(1+zem))**(2.75) # Not exact but close tau_zN = np.outer(Nval, sigma_z) # Integrand intg = 10.**(log_fnz) * (1. - np.exp(-1. * tau_zN)) # Sum sumz_first = False if sumz_first == False: #; Sum in N first N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)), 0) * dlgN * np.log(10.) #xdb.set_trace() # Sum in z teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz #xdb.set_trace() # Debug debug = False if debug == True: # x_splot, lgNval, alog10(10.d^(log_fnX) * dxdz * dz * Nval), /bloc # x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc # printcol, lgnval, log_fnx, dz, alog10(10.d^(log_fnX) * dxdz * dz * Nval) # writecol, 'debug_file'+strtrim(qq,2)+'.dat', $ # lgNval, restEW, log_fnX xdb.set_trace() # Return return zval, teff_LL
xdb.xplot(zval, teff_LL) #,xlabel='z', ylabel=r'$\tau_{\rm LL}$') # Check MFP if (flg_test % 64) >= 32: fN_model = xifm.default_model() z = 2.44 mfp = fN_model.mfp(z) print('MFP at z={:g} is {:g} Mpc'.format(z, mfp.value)) # Check Inoue+14 if (flg_test % 2**7) >= 2**6: print('Testing Akio Model') fN_model = fN_Model('Gamma') NHI = [12., 14., 17., 21.] z = 2.5 dXdz = igmu.cosm_xz(z, flg=1) # From Akio # 12 1.2e-9 # 14 4.9e-13 # 17 4.6e-18 # 21 6.7e-23 # Test 1D tstNz = (NHI, [z for ii in enumerate(NHI)]) log_fNX = fN_model.eval(tstNz, 0.) #xdb.set_trace() for kk, iNHI in enumerate(NHI): print('I+14 At z={:g} and NHI={:g}, f(N,z) = {:g}'.format( z, iNHI, 10.**log_fNX[kk] * dXdz)) # Test matrix
def ew_teff_lyman(ilambda, zem, fN_model, NHI_MIN=11.5, NHI_MAX=22.0, N_eval=5000, EW_spline=None, bval=24., fNz=False, cosmo=None, debug=False, cumul=None, verbose=False): """ tau effective (follows ew_teff_lyman.pro from XIDL) teff = ew_teff_lyman(3400., 2.4) Parameters: ------------- ilambda: float Observed wavelength zem: float Emission redshift of the source [sets which Lyman lines are included] bva: float -- Characteristics Doppler parameter for the Lya forest -- [Options: 24, 35 km/s] NHI_MIN: float -- Minimum log HI column for integration [default = 11.5] NHI_MAX: float -- Maximum log HI column for integration [default = 22.0] fNz: Boolean (False) -- Inputs f(N,z) instead of f(N,X) cosmo: astropy.cosmology (None) -- Cosmological model to adopt (as needed) cumul: List of cumulative sums -- Recorded only if cumul is not None Returns: teff: Total effective opacity of all lines contributing ToDo: 1. Parallelize the Lyman loop JXP 07 Nov 2014 """ # Lambda if not isinstance(ilambda, float): raise ValueError('igm.tau_eff: ilambda must be a float for now') Lambda = ilambda if not isinstance(Lambda, u.quantity.Quantity): Lambda = Lambda * u.AA # Ang # Read in EW spline (if needed) if EW_spline == None: if int(bval) == 24: EW_FIL = xa_path + '/igm/EW_SPLINE_b24.p' elif int(bval) == 35: EW_FIL = os.environ.get('XIDL_DIR') + '/IGM/EW_SPLINE_b35.fits' else: raise ValueError('igm.tau_eff: Not ready for this bvalue %g' % bval) EW_spline = pickle.load(open(EW_FIL, "rb")) # Lines wrest = tau_eff_llist() # Find the lines gd_Lyman = wrest[(Lambda / (1 + zem)) < wrest] nlyman = len(gd_Lyman) if nlyman == 0: if verbose: print('igm.tau_eff: No Lyman lines covered at this wavelength') return 0 # N_HI grid lgNval = NHI_MIN + (NHI_MAX - NHI_MIN) * np.arange(N_eval) / (N_eval - 1 ) # Base 10 dlgN = lgNval[1] - lgNval[0] Nval = 10.**lgNval teff_lyman = np.zeros(nlyman) # For cumulative if not cumul is None: cumul.append(lgNval) # Loop on the lines for qq, line in enumerate( gd_Lyman): # Would be great to do this in parallel... # (Can pack together and should) # Redshift zeval = ((Lambda / line) - 1).value if zeval < 0.: teff_lyman[qq] = 0. continue # Cosmology if fNz is False: if cosmo not in locals(): cosmo = FlatLambdaCDM(H0=70, Om0=0.3) # Vanilla #dxdz = (np.fabs(xigmu.cosm_xz(zeval-0.1, cosmo=cosmo)- # xigmu.cosm_xz(zeval+0.1,cosmo=cosmo)) / 0.2 ) #xdb.set_trace() dxdz = xigmu.cosm_xz(zeval, cosmo=cosmo, flg=1) else: dxdz = 1. # Code is using f(N,z) #print('dxdz = %g' % dxdz) # Get EW values (could pack these all together) idx = np.where(EW_spline['wrest'] == line)[0] if len(idx) != 1: raise ValueError( 'tau_eff: Line %g not included or over included?!' % line) restEW = interpolate.splev(lgNval, EW_spline['tck'][idx], der=0) # dz dz = ((restEW * u.AA) * (1 + zeval) / line).value # Evaluate f(N,X) at zeval log_fnX = fN_model.eval(lgNval, zeval).flatten() #xdb.set_trace() # Sum intgrnd = 10.**(log_fnX) * dxdz * dz * Nval teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.) if not cumul is None: cumul.append(np.cumsum(intgrnd) * dlgN * np.log(10.)) #xdb.set_trace() # Debug if debug == True: xdb.xplot(lgNval, np.log10(10.**(log_fnX) * dxdz * dz * Nval)) #x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc #printcol, lgnval, log_fnx, dz, alog10(10.d^(log_fnX) * dxdz * dz * Nval) #writecol, 'debug_file'+strtrim(qq,2)+'.dat', lgNval, restEW, log_fnX xdb.set_trace() #xdb.set_trace() return np.sum(teff_lyman)
def teff_ll(self, z912, zem, N_eval=5000, cosmo=None): """ Calculate teff_LL Effective opacity from LL absorption at z912 from zem Parameters: z912: float Redshift for evaluation zem: float Redshift of source cosmo: astropy.cosmology (None) Cosmological model to adopt (as needed) N_eval: int (5000) Discretization parameter Returns: zval, teff_LL: array z values and Effective opacity from LL absorption from z912 to zem JXP 10 Nov 2014 """ # Imports from astropy import constants as const # NHI array lgNval = 11.5 + 10.5*np.arange(N_eval)/(N_eval-1.) #; This is base 10 [Max at 22] dlgN = lgNval[1]-lgNval[0] Nval = 10.**lgNval #; z array zval = z912 + (zem-z912)*np.arange(N_eval)/(N_eval-1.) dz = np.fabs(zval[1]-zval[0]) teff_LL = np.zeros(N_eval) # dXdz dXdz = igmu.cosm_xz(zval, cosmo=cosmo, flg=1) #if keyword_set(FNZ) then dXdz = replicate(1.,N_eval) # Evaluate f(N,X) velo = (zval-zem)/(1+zem) * (const.c.cgs.value/1e5) # Kludge for eval [km/s] log_fnX = self.eval(lgNval, zem, vel_array=velo) log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz)) # Evaluate tau(z,N) teff_engy = (const.Ryd.to(u.eV,equivalencies=u.spectral()) / ((1+zval)/(1+zem)) ) sigma_z = xai.photo_cross(1,1,teff_engy) #xdb.set_trace() #sigma_z = teff_cross * ((1+zval)/(1+zem))**(2.75) # Not exact but close tau_zN = np.outer(Nval, sigma_z) # Integrand intg = 10.**(log_fnz) * (1. - np.exp(-1.*tau_zN)) # Sum sumz_first = False if sumz_first == False: #; Sum in N first N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)), 0) * dlgN * np.log(10.) #xdb.set_trace() # Sum in z teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz #xdb.set_trace() # Debug debug=False if debug == True: # x_splot, lgNval, alog10(10.d^(log_fnX) * dxdz * dz * Nval), /bloc # x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc # printcol, lgnval, log_fnx, dz, alog10(10.d^(log_fnX) * dxdz * dz * Nval) # writecol, 'debug_file'+strtrim(qq,2)+'.dat', $ # lgNval, restEW, log_fnX xdb.set_trace() # Return return zval, teff_LL
def eval(self, NHI, z, vel_array=None, cosmo=None): """ Evaluate the model at a set of NHI values Parameters: NHI: array NHI values z: float or array Redshift for evaluation vel_array: array Velocities relative to z Returns: log_fNX: float, array, or 2D array f(NHI,X)[z] values Float if given one NHI,z value each. Otherwise 2D array If 2D, it is [NHI,z] on the axes JXP 07 Nov 2014 """ # Exception checking? # Imports from astropy import constants as const # Tuple? if isinstance(NHI,tuple): # All values packed into NHI parameter z = NHI[1] NHI = NHI[0] flg_1D = 1 else: # NHI and z separate flg_1D = 0 # NHI if isiterable(NHI): NHI = np.array(NHI) # Insist on array else: NHI = np.array([NHI]) lenNHI = len(NHI) # Redshift if vel_array is not None: z_val = z + (1+z) * vel_array/(const.c.cgs.value/1e5) else: z_val = z if isiterable(z_val): z_val = np.array(z_val) else: z_val = np.array([z_val]) lenz = len(z_val) # Check on zmnx bad = np.where( (z_val < self.zmnx[0]) | (z_val > self.zmnx[1]))[0] if len(bad) > 0: raise ValueError( 'fN.model.eval: z={:g} not within self.zmnx={:g},{:g}'.format(z_val[bad[0]],*(self.zmnx))) if self.fN_mtype == 'Hspline': # Evaluate without z dependence log_fNX = self.model.__call__(NHI) # Evaluate #xdb.set_trace() if (not isiterable(z_val)) | (flg_1D == 1): # scalar or 1D array wanted log_fNX += self.gamma * np.log10((1+z_val)/(1+self.zpivot)) else: # Matrix algebra to speed things up lgNHI_grid = np.outer(log_fNX, np.ones(len(z_val))) lenfX = len(log_fNX) #xdb.set_trace() # z_grid1 = 10**( np.outer(np.ones(lenfX)*self.gamma, np.log10(1+z_val)) ) #; (1+z)^gamma z_grid2 = np.outer( np.ones(lenfX)*((1./(1+self.zpivot))**self.gamma), np.ones(len(z_val)) ) log_fNX = lgNHI_grid + np.log10(z_grid1*z_grid2) # Gamma function (e.g. Inoue+14) elif self.fN_mtype == 'Gamma': # Setup the parameters Nl, Nu, Nc, bval = self.param[0] # gNHI Bi = self.param[1] ncomp = len(Bi) log_gN = np.zeros((lenNHI,ncomp)) beta = [item[1] for item in self.param[2:]] for kk in range(ncomp): #xdb.set_trace() log_gN[:,kk] += (np.log10(Bi[kk]) + NHI*(-1 * beta[kk]) + (-1. * 10.**(NHI-Nc) / np.log(10) ) ) # log10 [ exp(-NHI/Nc) ] # f(z) fz = np.zeros((lenz,2)) # Loop on NHI for kk in range(ncomp): if kk == 0: # LyaF zcuts = self.param[2][2:4] gamma = self.param[2][4:] else: # DLA zcuts = [self.param[3][2]] gamma = self.param[3][3:] zcuts = [0] + list(zcuts) + [999.] Aval = self.param[2+kk][0] # Cut on z for ii in range(1,len(zcuts)): izcut = np.where( (z_val < zcuts[ii]) & (z_val > zcuts[ii-1]) )[0] liz = len(izcut) # Evaluate (at last!) #xdb.set_trace() if (ii <=2) & (liz > 0): fz[izcut,kk] = Aval * ( (1+z_val[izcut]) / (1+zcuts[1]) )**gamma[ii-1] elif (ii == 3) & (liz > 0): fz[izcut,kk] = Aval * ( ( (1+zcuts[2]) / (1+zcuts[1]) )**gamma[ii-2] * ((1+z_val[izcut]) / (1+zcuts[2]) )**gamma[ii-1] ) # else: # raise ValueError('fN.model.eval: Should not get here') # dX/dz dXdz = igmu.cosm_xz(z_val, cosmo=cosmo, flg=1) # Final steps if flg_1D == 1: # #xdb.set_trace() fnX = np.sum(fz * 10.**log_gN, 1) / dXdz log_fNX = np.log10(fnX) else: # Generate the matrix fnz = np.zeros((lenNHI,lenz)) for kk in range(ncomp): fnz += np.outer(10.**log_gN[:,kk],fz[:,kk]) # Finish up log_fNX = np.log10(fnz) - np.log10( np.outer(np.ones(lenNHI), dXdz) ) else: raise ValueError('fN.model: Not ready for this model type {:%s}'.format(self.fN_mtype)) # Return if (lenNHI + lenz) == 2: return log_fNX.flatten()[0] # scalar else: return log_fNX
xdb.xplot(zval,teff_LL)#,xlabel='z', ylabel=r'$\tau_{\rm LL}$') # Check MFP if (flg_test % 64) >= 32: fN_model = xifm.default_model() z = 2.44 mfp = fN_model.mfp(z) print('MFP at z={:g} is {:g} Mpc'.format(z,mfp.value)) # Check Inoue+14 if (flg_test % 2**7) >= 2**6: print('Testing Akio Model') fN_model = fN_Model('Gamma') NHI = [12.,14.,17.,21.] z = 2.5 dXdz = igmu.cosm_xz(z, flg=1) # From Akio # 12 1.2e-9 # 14 4.9e-13 # 17 4.6e-18 # 21 6.7e-23 # Test 1D tstNz = ( NHI, [z for ii in enumerate(NHI)] ) log_fNX = fN_model.eval(tstNz,0.) #xdb.set_trace() for kk,iNHI in enumerate(NHI): print('I+14 At z={:g} and NHI={:g}, f(N,z) = {:g}'.format(z,iNHI,10.**log_fNX[kk] * dXdz)) # Test matrix log_fNX = fN_model.eval(NHI,z)