def compute_recon_power_spectrum(fishcast,z,b=-1.,b2=-1.,bs=-1.,N=None): ''' Returns the reconstructed power spectrum, following Stephen's paper. ''' if b == -1.: b = compute_b(fishcast,z) if b2 == -1: b2 = 8*(b-1)/21 if bs == -1: bs = -2*(b-1)/7 noise = 1/compute_n(fishcast,z) if fishcast.experiment.HI: noise = castorinaPn(z) if N is None: N = 1/compute_n(fishcast,z) f = fishcast.cosmo.scale_independent_growth_factor_f(z) bL1 = b-1. bL2 = b2-8*(b-1)/21 bLs = bs+2*(b-1)/7 K,MU = fishcast.k,fishcast.mu h = fishcast.params['h'] klin = np.logspace(np.log10(min(K)),np.log10(max(K)),fishcast.Nk) mulin = MU.reshape((fishcast.Nk,fishcast.Nmu))[0,:] plin = np.array([fishcast.cosmo.pk_cb_lin(k*h,z)*h**3. for k in klin]) zelda = Zeldovich_Recon(klin,plin,R=15,N=2000,jn=5) kSparse,p0ktable,p2ktable,p4ktable = zelda.make_pltable(f,ngauss=3,kmin=min(K),kmax=max(K),nk=200,method='RecSym') bias_factors = np.array([1, bL1, bL1**2, bL2, bL1*bL2, bL2**2, bLs, bL1*bLs, bL2*bLs, bLs**2,0,0,0]) p0Sparse = np.sum(p0ktable*bias_factors, axis=1) p2Sparse = np.sum(p2ktable*bias_factors, axis=1) p4Sparse = np.sum(p4ktable*bias_factors, axis=1) p0,p2,p4 = Spline(kSparse,p0Sparse)(klin),Spline(kSparse,p2Sparse)(klin),Spline(kSparse,p4Sparse)(klin) l0,l2,l4 = legendre(0),legendre(2),legendre(4) Pk = lambda mu: p0*l0(mu) + p2*l2(mu) + p4*l4(mu) result = np.array([Pk(mu) for mu in mulin]).T return result.flatten() + N
def read_from_h5(file_name, **kwargs): """Read data from an H5 file in LVC format""" import re import h5py from scipy.interpolate import InterpolatedUnivariateSpline as Spline phase_re = re.compile('phase_l(?P<ell>.*)_m(?P<m>.*)') amp_re = re.compile('amp_l(?P<ell>.*)_m(?P<m>.*)') with h5py.File(file_name) as f: t = f['NRtimes'][:] ell_m = np.array([[int(match['ell']), int(match['m'])] for key in f for match in [phase_re.match(key)] if match]) ell_min = np.min(ell_m[:, 0]) ell_max = np.max(ell_m[:, 0]) data = np.empty((t.size, sf.LM_total_size(ell_min, ell_max)), dtype=complex) for ell in range(ell_min, ell_max+1): for m in range(-ell, ell+1): amp = Spline(f['amp_l{0}_m{1}/X'.format(ell, m)][:], f['amp_l{0}_m{1}/Y'.format(ell, m)][:], k=int(f['amp_l{0}_m{1}/deg'.format(ell, m)][()]))(t) phase = Spline(f['phase_l{0}_m{1}/X'.format(ell, m)][:], f['phase_l{0}_m{1}/Y'.format(ell, m)][:], k=int(f['phase_l{0}_m{1}/deg'.format(ell, m)][()]))(t) data[:, sf.LM_index(ell, m, ell_min)] = amp * np.exp(1j * phase) if 'auxiliary-info' in f and 'history.txt' in f['auxiliary-info']: history = ("### " + f['auxiliary-info/history.txt'][()].decode().replace('\n', '\n### ')).split('\n') else: history = [""] constructor_statement = "scri.LVC.read_from_h5('{0}')".format(file_name) w = WaveformModes(t=t, data=data, ell_min=ell_min, ell_max=ell_max, frameType=Inertial, dataType=h, history=history, constructor_statement=constructor_statement, r_is_scaled_out=True, m_is_scaled_out=True) return w
def __init__(self): super(RBBTable, self).__init__() self.df = pd.read_hdf( os.path.join(fu.codepath, 'data', 't_to_norm_rad.hdf'), 'df') self.table_temps = self.df.index.values.astype('float') self.t2rad = {} self.rad2t = {} # the radiances for abs(T) < 3 K are 0 for channels 3-5 which means that during the # backward lookup of radiance to T, the 0 radiance can not be looked up functionally # (it's now a relation and not a function anymore). This makes the Spline interpolator # ignore the negative part which I cannot afford. # The work-around is to interpolate from T -3 to 3 (which are impossibly close to 0 # anyway for channels 3-5), ignoring all 0 values for T in [-2..2] sliced = self.df.ix[abs(self.df.index) > 2] for ch in range(3, 10): # store the Spline interpolators in dictionary, 1 per channel self.t2rad[ch] = Spline(self.table_temps, self.df[ch], s=0.0, k=1) # for channels 3-5, take the data without the values around 0: if ch < 6: data = sliced[ch] temps = sliced.index.values.astype('float') else: data = self.df[ch] temps = self.table_temps # store the Spline interpolators in dictionary, 1 per channel self.rad2t[ch] = Spline(data, temps, s=0.0, k=1)
def _setup_chi(self): chival = self.provider.get_comoving_radial_distance(self.zarray) zatchi = Spline(chival, self.zarray) chiatz = Spline(self.zarray, chival) chimin = np.min(chival) + 1.e-5 chimax = np.max(chival) chival = np.linspace(chimin, chimax, self.Nchi) zval = zatchi(chival) chistar = \ self.provider.get_comoving_radial_distance(self.provider.get_param('zstar')) chivalp = \ np.array(list(map(lambda x: np.linspace(x, chistar, self.Nchi_mag), chival))) chivalp = chivalp.transpose()[0] zvalp = zatchi(chivalp) chi_result = { 'zatchi': zatchi, 'chiatz': chiatz, 'chival': chival, 'zval': zval, 'chivalp': chivalp, 'zvalp': zvalp } return chi_result
def iteration_step(den): """Performs one iteration of the Kohn-Sham cycle (see DFT cheat sheet).""" # -- Step 1: build effective Kohn-Sham potential -- # determine w(r) via Poisson's equation for initial or mixed density w, r = solve_poisson(den) # find hom.sol. w_hom(r) = a*r to match BC w(r_max) = q_tot = z beta = (z - w[-1]) / r[-1] w += beta * r print("[INFO] using w_hom(r) with beta = {:.4f}".format(beta)) # interpolate directly Hartree potential since we don't need w(r) anymore # and include a Hartree-Fock-like exchange of -1/2 * Hartree potential vh = Spline(r, 0.5 * w / r) def veff(r): """Effective Kohn-Sham potential --> compare with DFT cheat sheet.""" vext = -z / r return vh(r) + vext # -- Step 2: solve single-particle Schrödinger-like equation -- # integration returns (u(r_i), r_i) --> u(r_0) = solver(E)[0][0] en = newton(lambda en0: solve_rseq(en0, veff)[0][0], -2.0, maxiter=50) # integrate again using the correct energy u, r = solve_rseq(en, veff) # normalize u^2 to 1 regardless of z-value since psi(r) = Y_00 * u(r) / r norm = trapz(u ** 2, r) u /= np.sqrt(norm) print("[INFO] normalizing |u(r)|^2 from {:.3g} to 1".format(norm)) # -- Step 3: construct and interpolate new density -- den_new = Spline(r, z * u ** 2) # -- Step 4: compute total energy etot = energy_functional(den_new, vh, en) return etot, den_new
def calc_sig_dist_spline(x, y, xref, yref, n_iter=5, sig_clip=3, plot=False, smooth_fac=10000, wx=None, wy=None, ret_bool=False): #now sigma clip sbool = np.ones(x.shape, dtype='bool') for i in range(n_iter): #import pdb; pdb.set_trace() tx = Spline(x[sbool], y[sbool], xref[sbool] - x[sbool], s=smooth_fac, w=wx[sbool]) ty = Spline(x[sbool], y[sbool], yref[sbool] - y[sbool], s=smooth_fac, w=wy[sbool]) xout = tx.ev(x, y) yout = ty.ev(x, y) dx = xref - xout - x dy = yref - yout - y xcen = np.mean(dx[sbool]) xsig = np.std(dx[sbool]) ycen = np.mean(dy[sbool]) ysig = np.std(dy[sbool]) sbool_temp = (dx > xcen - sig_clip * xsig) * ( dx < xcen + sig_clip * xsig) * (dy > ycen - sig_clip * ysig) * ( dy < ycen + sig_clip * ysig) if i != n_iter - 1: sbool = sbool_temp * sbool print 'trimmed ', len(sbool) - np.sum(sbool), ' stars' if plot: print 'number of residuals outside of -5,5', np.sum((dx > 5) + (dx < -5) + (dy < -5) + (dy > 5)) plt.figure(35) plt.subplot(121) plt.hist(dx, bins=100, range=(-5, 5)) plt.title('X residual to fit') plt.subplot(122) plt.hist(dy, bins=100, range=(-5, 5)) plt.title('Y residual to fit') plt.show() if not ret_bool: return tx, ty, dx, dy else: return tx, ty, dx, dy, sbool
def make_bao_plot(fname): """Does the work of making the BAO figure.""" zlist = [2.0, 3.0, 4.0, 5.0, 6.0] clist = ['b', 'c', 'g', 'm', 'r'] # Now make the figure. fig, ax = plt.subplots(1, 2, figsize=(6, 3.0), sharey=True) ii, jj = 0, 0 for zz, col in zip(zlist, clist): # Read the data from file. aa = 1.0 / (1.0 + zz) pkd = np.loadtxt(dpath + "HI_bias_{:06.4f}.txt".format(aa))[1:, :] # Now read linear theory and put it on the same grid. lin = np.loadtxt("../../data/pklin_{:06.4f}.txt".format(aa)) dk = pkd[1, 0] - pkd[0, 0] kk = np.linspace(pkd[0, 0] - dk / 2, pkd[-1, 0] + dk / 2, 5000) tmp = np.interp(kk, lin[:, 0], lin[:, 1]) lin = np.zeros_like(pkd) for i in range(pkd.shape[0]): lin[i, 0] = pkd[i, 0] ww = np.nonzero((kk > pkd[i, 0] - dk / 2) & (kk < pkd[i, 0] + dk / 2)) lin[i, 1] = np.sum(kk[ww]**2 * tmp[ww]) / np.sum(kk[ww]**2) # Take out the broad band. if False: # Use smoothing spline as broad-band/no-wiggle. knots = np.arange(0.05, 0.5, 0.05) ss = Spline(pkd[:, 0], pkd[:, 1], t=knots) rat = pkd[:, 1] / ss(pkd[:, 0]) else: # Use Savitsky-Golay filter for no-wiggle. ss = savgol_filter(pkd[:, 1], 7, polyorder=2) rat = pkd[:, 1] / ss ax[ii].plot(pkd[:,0],rat+0.2*(jj//2),col+'-',\ label="$z={:.1f}$".format(zz)) if False: # Use smoothing spline as broad-band/no-wiggle. ss = Spline(pkd[:, 0], lin, t=knots) rat = lin / ss(pkd[:, 0]) else: # Use Savitsky-Golay filter for no-wiggle. ss = savgol_filter(lin[:, 1], 7, polyorder=2) rat = lin[:, 1] / ss ax[ii].plot(pkd[:, 0], rat + 0.2 * (jj // 2), col + ':') ii = (ii + 1) % 2 jj = jj + 1 # Tidy up the plot. for ii in range(ax.size): ax[ii].legend(ncol=2, framealpha=0.5) ax[ii].set_xlim(0.05, 0.4) ax[ii].set_ylim(0.75, 1.5) ax[ii].set_xscale('linear') ax[ii].set_yscale('linear') # Put on some more labels. ax[0].set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$') ax[1].set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$') ax[0].set_ylabel(r'$P(k)/P_{\rm nw}(k)$+offset') # and finish up. plt.tight_layout() plt.savefig(fname)
def make_calib_plot(): """Does the work of making the calibration figure.""" # Now make the figure. fig, ax = plt.subplots(1, 2, figsize=(6, 2.5)) # The left hand panel is DLA bias vs. redshift. bDLA = np.loadtxt("boss_bDLA.txt") ax[0].errorbar(bDLA[:, 0], bDLA[:, 1], yerr=bDLA[:, 2], fmt='o') ax[0].fill_between([1.5,3.5],[1.99-0.11,1.99-0.11],\ [1.99+0.11,1.99+0.11],\ color='lightgrey',alpha=0.5) # The N-body results. bb = np.loadtxt("HI_bias_vs_z_fid.txt") ax[0].plot(bb[:, 0], bb[:, 2], 'md') ss = Spline(bb[::-1, 0], bb[::-1, 2]) ax[0].plot(np.linspace(1.5, 3.5, 100), ss(np.linspace(1.5, 3.5, 100)), 'm--') # ax[0].set_xlabel(r'$z$') ax[0].set_ylabel(r'$b_{DLA}(z)$') # Tidy up. ax[0].set_xlim(1.95, 3.25) ax[0].set_ylim(1, 3) ax[0].set_xscale('linear') ax[0].set_yscale('linear') # The right hand panel is OmegaHI vs. z. # Read in the data and convert to "normal" OmegaHI convention. dd = np.loadtxt("omega_HI_obs.txt") Ez = np.sqrt(0.3 * (1 + dd[:, 0])**3 + 0.7) ax[1].errorbar(dd[:,0],1e-3*dd[:,1]/Ez**2,yerr=1e-3*dd[:,2]/Ez**2,\ fmt='s',mfc='None') # Plot the fit line. zz = np.linspace(0, 7, 100) Ez = np.sqrt(0.3 * (1 + zz)**3 + 0.7) ax[1].plot(zz, 4e-4 * (1 + zz)**0.6 / Ez**2, 'k-') # Now plot the simulation points. dd = np.loadtxt("omega_HI_sim.txt") ######################dd[:,1] *= 3e5*(1+(3.5/dd[:,0])**6) / 2e9 ax[1].plot(dd[:, 0], dd[:, 1], 'md') ss = Spline(dd[:, 0], dd[:, 1]) ax[1].plot(np.linspace(2, 6, 100), ss(np.linspace(2, 6, 100)), 'm--') # Tidy up the plot. ax[1].set_xlim(1, 6.25) ax[1].set_ylim(4e-6, 3e-4) ax[1].set_xscale('linear') ax[1].set_yscale('log') # Put on some more labels. ax[1].set_xlabel(r'$z$') ax[1].set_ylabel(r'$\Omega_{HI}$') # and finish up. plt.tight_layout() plt.savefig('calib.pdf')
def interpolate(self, fdata): """Interpolate from computed data to required data. Parameters ---------- fdata : ndarray Frequency-domain data corresponding to ``freq_compute``. Returns ------- full_data : ndarray Frequency-domain data corresponding to ``freq_required``. """ # Pre-allocate result. out = np.zeros(self.freq_required.size, dtype=np.complex128) # 1. Interpolate between fmin and fmax. # If freq_coarse is not exactly freq_required, we use cubic spline to # interpolate from fmin to fmax. if self.freq_coarse.size != self.freq_required.size: int_real = Spline(np.log(self.freq_compute), fdata.real)(np.log(self.freq_interpolate)) int_imag = Spline(np.log(self.freq_compute), fdata.imag)(np.log(self.freq_interpolate)) out[self.ifreq_interpolate] = int_real + 1j * int_imag # If they are the same, just fill in the data. else: out[self.ifreq_interpolate] = fdata # 2. Extrapolate from freq_required.min to fmin using PCHIP. # 2.a Extend freq_required/data by adding a point at 1e-100 Hz with # - same real part as lowest computed frequency and # - zero imaginary part. freq_ext = np.r_[1e-100, self.freq_compute] data_ext = np.r_[fdata[0].real - 1e-100j, fdata] # 2.b Actual 'extrapolation' (now an interpolation). ext_real = Pchip(freq_ext, data_ext.real)(self.freq_extrapolate) ext_imag = Pchip(freq_ext, data_ext.imag)(self.freq_extrapolate) out[self.ifreq_extrapolate] = ext_real + 1j * ext_imag return out
def read_from_h5(file_name, **kwargs): """Read data from an H5 file in LVC format""" import re import h5py from scipy.interpolate import InterpolatedUnivariateSpline as Spline phase_re = re.compile("phase_l(?P<ell>.*)_m(?P<m>.*)") amp_re = re.compile("amp_l(?P<ell>.*)_m(?P<m>.*)") with h5py.File(file_name, "r") as f: t = f["NRtimes"][:] ell_m = np.array([[int(match["ell"]), int(match["m"])] for key in f for match in [phase_re.match(key)] if match]) ell_min = np.min(ell_m[:, 0]) ell_max = np.max(ell_m[:, 0]) data = np.empty((t.size, sf.LM_total_size(ell_min, ell_max)), dtype=complex) for ell in range(ell_min, ell_max + 1): for m in range(-ell, ell + 1): amp = Spline(f[f"amp_l{ell}_m{m}/X"][:], f[f"amp_l{ell}_m{m}/Y"][:], k=int(f[f"amp_l{ell}_m{m}/deg"][()]))(t) phase = Spline(f[f"phase_l{ell}_m{m}/X"][:], f[f"phase_l{ell}_m{m}/Y"][:], k=int(f[f"phase_l{ell}_m{m}/deg"][()]))(t) data[:, sf.LM_index(ell, m, ell_min)] = amp * np.exp(1j * phase) if "auxiliary-info" in f and "history.txt" in f["auxiliary-info"]: history = ("### " + f["auxiliary-info/history.txt"][()].decode().replace( "\n", "\n### ")).split("\n") else: history = [""] constructor_statement = f"scri.LVC.read_from_h5('{file_name}')" w = WaveformModes( t=t, data=data, ell_min=ell_min, ell_max=ell_max, frameType=Inertial, dataType=h, history=history, constructor_statement=constructor_statement, r_is_scaled_out=True, m_is_scaled_out=True, ) return w
def make_bao_plot(): """Does the work of making the BAO figure.""" zlist = [2.0, 3.0, 4.0, 5.0, 6.0] clist = ['b', 'c', 'g', 'm', 'r'] # Now make the figure. fig, ax = plt.subplots(1, 2, figsize=(6, 3.0), sharey=True) ii, jj = 0, 0 for zz, col in zip(zlist, clist): # Read the data from file. aa = 1.0 / (1.0 + zz) pkd = np.loadtxt("HI_pks_1d_{:06.4f}.txt".format(aa)) # Now read linear theory and put it on the same grid -- currently # not accounting for finite bin width. lin = np.loadtxt("pklin_{:06.4f}.txt".format(aa)) lin = np.interp(pkd[:, 0], lin[:, 0], lin[:, 1]) # Take out the broad band. if False: # Use smoothing spline as broad-band/no-wiggle. knots = np.arange(0.05, 0.5, 0.05) ss = Spline(pkd[:, 0], pkd[:, 1], t=knots) rat = pkd[:, 1] / ss(pkd[:, 0]) else: # Use Savitsky-Golay filter for no-wiggle. ss = savgol_filter(pkd[:, 1], 7, polyorder=2) rat = pkd[:, 1] / ss ax[ii].plot(pkd[:,0],rat+0.2*(jj//2),col+'-',\ label="$z={:.1f}$".format(zz)) if False: # Use smoothing spline as broad-band/no-wiggle. ss = Spline(pkd[:, 0], lin, t=knots) rat = lin / ss(pkd[:, 0]) else: # Use Savitsky-Golay filter for no-wiggle. ss = savgol_filter(lin, 7, polyorder=2) rat = lin / ss ax[ii].plot(pkd[:, 0], rat + 0.2 * (jj // 2), col + ':') ii = (ii + 1) % 2 jj = jj + 1 # Tidy up the plot. for ii in range(ax.size): ax[ii].legend(ncol=2, framealpha=0.5) ax[ii].set_xlim(0.05, 0.4) ax[ii].set_ylim(0.75, 1.5) ax[ii].set_xscale('linear') ax[ii].set_yscale('linear') # Put on some more labels. ax[0].set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$') ax[1].set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$') ax[0].set_ylabel(r'$P(k)/P_{\rm nw}(k)$+offset') # and finish up. plt.tight_layout() plt.savefig('HI_bao.pdf')
def smooth_function_from_coordinates( x, y, # coordinates of some curve sample_fraction=1.0, # fraction of x,y points used for resampling spline_smoothing=4, # degree of spline (1 gives piecewise linear) ): """ Given a set of coordinates in the `x` and `y` arrays, create a smooth function from these coordinates by 1) resampling n uniformly distributed points by linear interpolation of the `x` and `y` coordinates, where n is given by `sample_fraction` times the length of `x`; and 2) interpolating the resampled points by a smooth spline, where `spline_smoothing` is an integer holding the degree of the piecewise polynomial pieces of the spline (0 and 1 gives a piecewise linear function, 2 and higher gives splines of that order). Return the smooth function as a Python function of x, together with the (uniformly distributed) resampled points on which the smooth function is based. """ # Construct linear interpolator of data points from Scientific.Functions.Interpolation \ import InterpolatingFunction linear = InterpolatingFunction([x], y) # Resample xp = np.linspace(x[0], x[-1], sample_fraction * len(x)) yp = np.array([linear(xi) for xi in xp]) # Spline smoothing or linear interpolation, based on (xp,yp) if spline_smoothing >= 2: from scipy.interpolate import UnivariateSpline as Spline function = Spline(xp, yp, s=0, k=spline_smoothing) else: function = InterpolatingFunction([xp], yp) return function, xp, yp
def main(): print("\n/---------------------------------------------------\\") print("| Ass.5.2: Self-consistent loop for the helium atom |") print("\\---------------------------------------------------/\n") print("[INIT] parameter settings for this run:") print(" electrons: {}".format(z)) print(" step size h: {}".format(h)) print(" minimum r: {}".format(tmin)) print(" maximum r: {}".format(tmax)) print(" grid pts: {}".format(nsteps)) print(" dense grid pts: {}".format(nsteps_dense)) print(" max. iterations: {}".format(itermax)) print(" convergence dE: {}".format(etol)) print(" mixing alpha: {}".format(amix)) print(" initial density: {}".format(init_density)) # -- Step 0: initialize density with exact H-atom-like density -- if init_density == "H-atom": den_init = lambda r: z * z ** 3 * r * np.exp(-2 * z * r) # noqa # -- alternatively initialize density with random numbers -- elif init_density == "random": den_init = lambda r: random.randrange(0, 2) # noqa else: raise ValueError("unknown initial density setting") # init loop variables den_mix = den_old = den_init etot_old = 0 iterstep = 0 # Kohn-Sham cycle: repeat until convergence is achieved while True: iterstep += 1 print("\n\n[ITER] starting {}. iteration step".format(iterstep)) # -- Steps 1 to 4: solve Kohn-Sham equations and find total energy -- etot_new, den_new = iteration_step(den_mix) ediff = abs(etot_new - etot_old) print("[INFO] energy difference dE = {:.3e} Ha".format(ediff)) # -- Step 5: check for convergence -- if iterstep >= itermax and ediff > etol: print("\n[STOP] could not achieve convergence in 20 iterations\n") break elif ediff > etol: # -- Step 6: mix densities for faster convergence -- r = np.linspace(tmin, tmax, nsteps_dense) den_mix = Spline(r, amix * den_new(r) + (1 - amix) * den_old(r)) den_old = den_new etot_old = etot_new else: print("\n[STOP] convergence achieved in", iterstep, "steps\n") print("/------------------------------------\\") print("| >>> final total energy <<< |") print("| |") print("| E[n] = E_s[n] - E_H(SIC)[n] |") print("| = {:.4f} Ha |".format(etot_new)) print("\\------------------------------------/") break
def Compute_Linear_Covariance(self, comoving): if self.log.isEnabledFor(logging.INFO): self.log.info('Computation of prior covariance started') Covariance=np.zeros((np.size(comoving), np.size(comoving))) k = np.linspace(10**(-5), 10, 10**6) size = len(k)//2 Pk = self.Plin(k) fourier_coeff = np.abs(np.fft.fftn(Pk)[0:size+1]) frqs = np.linspace(0, 0.1*size, size+1) cf_lin = Spline(frqs, fourier_coeff) diff=np.zeros((np.size(comoving), np.size(comoving))) for i in range(np.size(comoving)): for j in range(np.size(comoving)): diff[i, j]=np.abs(comoving[i]-comoving[j]) Covariance=cf_lin(diff) Covariance /= Covariance[0, 0] if self.log.isEnabledFor(logging.INFO): self.log.info('Prior Covariance computed') return Covariance
def update_transfer(self, path_transfer, column=None): transfer_function = camb2nbodykit(path_transfer, column=column) transfer_function /= self.max scales = camb2nbodykit(path_transfer, column=0) self.transfer_function = Spline(scales, transfer_function)
def __init__(self, cosmo, redshift, transfer, path_transfer=None, column=None): assert transfer in TRANSFERS self.transfer = transfer if self.transfer == 'CAMB': transfer_function = camb2nbodykit(path_transfer, column=column) self.max = np.max(transfer_function) transfer_function /= self.max scales = camb2nbodykit(path_transfer, column=0) self.transfer_function = Spline(scales, transfer_function) # set cosmology values self._sigma8 = cosmo.sigma8 self.n_s = cosmo.n_s self.W_T = lambda x: 3/x**3 * (np.sin(x) - x * np.cos(x)) growth = cosmo.scale_independent_growth_factor(redshift) # normalize to proper sigma8 self._norm = 1 self._norm = (self._sigma8 / self._sigma_r(8.))**2 * growth**2 else: self.power_spectrum = cosmology.LinearPower(cosmo, redshift, transfer=self.transfer)
def calculate_full_load(full_load_speeds, full_load_powers, idle_engine_speed): """ Calculates the full load curve. :param full_load_speeds: T1 map speed vector [RPM]. :type full_load_speeds: numpy.array :param full_load_powers: T1 map power vector [kW]. :type full_load_powers: numpy.array :param idle_engine_speed: Engine speed idle median and std [RPM]. :type idle_engine_speed: (float, float) :return: Vehicle full load curve, Maximum power [kW], Rated engine speed [RPM]. :rtype: (scipy.interpolate.InterpolatedUnivariateSpline, float, float) """ pn = np.array((full_load_speeds, full_load_powers)) max_speed_at_max_power, max_power = pn[:, np.argmax(pn[1])] pn[1] /= max_power idle = idle_engine_speed[0] pn[0] = (pn[0] - idle) / (max_speed_at_max_power - idle) return Spline(*pn, ext=3), max_power, max_speed_at_max_power
def redshift_from_distance(cosmo, lg_num_sample=5): """Invert redshift-to-distance relationship of a cosmological model to redshift-from-distance. Notes ----- This is useful when the Alcock--Paczynski effect needs to be included in modelling. Only valid for redshift between 1.e-3 and 100. Parameters ---------- cosmo : :class:`nbodykit.cosmology.cosmology.Cosmology` Cosmological model. lg_num_sample : float, optional Base-10 logarithm of the number of redshift points to sample the comoving distance as a function of redshift (default is 5, i.e. 100000 samle points). Returns ------- callable Redshift-from-distance function. """ Z_LOG_RANGE = (-3, 2) z_samples = np.logspace(*Z_LOG_RANGE, num=10**lg_num_sample) r_samples = cosmo.comoving_distance(z_samples) return Spline(r_samples, z_samples, ext='raise')
def spline(x, y, x_eval=None, weights=None, smooth=None): """Smoothing spline fit. The fit and smoothness depend on (i) number of samples, (ii) variance and (iii) error of each data point. Parameters ---------- x, y : array-like The data to fit: y(x). x_eval : array-like, optional The points to evaluate the fitted spline. If not given, then the spline is evaluated on the same original x's. weights : array-like Array with 1/std of each data point. Note: to reflect the heterokedasticity use 1/moving-window-std as weight values. smooth : float, optional Smoothing parameter. If weights are given, then s = len(weights). """ ind, = np.where((~np.isnan(x)) & (~np.isnan(y))) x2, y2 = x[ind], y[ind] if weights is not None: weights = weights[ind] if x_eval is None: x_eval = x return Spline(x2, y2, w=weights, s=smooth)(x_eval)
def calculate_service_battery_currents_v1(service_battery_capacity, times, service_battery_state_of_charges): """ Calculate the service battery current vector [A]. :param service_battery_capacity: Service battery capacity [Ah]. :type service_battery_capacity: float :param times: Time vector [s]. :type times: numpy.array :param service_battery_state_of_charges: State of charge of the service battery [%]. :type service_battery_state_of_charges: numpy.array :return: Service battery current vector [A]. :rtype: numpy.array """ from scipy.interpolate import UnivariateSpline as Spline soc = service_battery_state_of_charges ib = Spline(times, soc, w=np.tile(10, times.shape[0])).derivative()(times) return ib * (service_battery_capacity * 36.0)
def normalize(energy, veff): """integrating with calculated energy eigenvalue and normalization""" u, r = solve_rad_seq(energy, veff) u_spline = Spline(r, u * u) norm = trapezoidal(u_spline, r[0], r[-1]) u2 = u * u / norm return u2, r, norm
def gaussian_poly_extrap(kout, kint, pint, frac = 1): ''' Extrapolates beyond the end of kint by a damped polynomial in kint (i.e. Hermite). Does nothing on the low k end. The extrapolation form is (A + B k) * exp(- k^2/k0^2) where k0 is taken to be some fraction (1) of the final element of kint. ''' # Solve for the coefficients k1, k2 = kint[-2], kint[-1] p1, p2 = pint[-2], pint[-1] k0 = frac * k2 B = (p2 * np.exp(k2**2/k0**2) - p1 * np.exp(k1**2/k0**2)) / (k2 - k1) A = p2 * np.exp(k2**2/k0**2) - B * k2 # Interpolate/extrapolate ret = np.zeros_like(kout) extrap_iis = (kout > k2) ret[~extrap_iis] = Spline(kint,pint)(kout[~extrap_iis]) ret[extrap_iis] = ( (A + B*kout) * np.exp(-kout**2/k0**2) )[extrap_iis] return ret
def interpolate_bb_temps(self): """Interpolating the BB H/K temperatures all over the dataframe. This is necessary, as the frequency of the measurements is too low to have meaningful mean values during the BB-views. Also, bb_2_temps are measured so rarely that there might not be at all a measurement during a bb-view. """ # just a shortcutting reference df = self.df # bb_1_temp is much more often sampled than bb_2_temp bb1temps = df.bb_1_temp.dropna() bb2temps = df.bb_2_temp.dropna() # converting to float because the fitting libraries want to have floats all_times = df.index.values.astype('float64') # loop over both temperature arrays [D.R.Y. principle!] # the number of data points in bb1temps are much higher, but for # consistency we should interpolate both the same way. for bbtemp in [bb1temps, bb2temps]: # converting the time series to floats for interpolation ind = bbtemp.index.values.astype('float64') # s=0.0 means I do not allow distance from measured points for the spline # k=1 means that it will be a local-linear fitted spline between points # create interpolator function temp_interpolator = Spline(ind, bbtemp, s=0.0, k=1) # get new temperatures at all_times df[bbtemp.name + '_interp'] = temp_interpolator(all_times)
def main(): print("\n/-----------------------------------------------\\") print("| Ass.5.1: Hartree energy for H-atom GS density |") print("\\-----------------------------------------------/\n") print("[INFO] using h = {} for numerically solving ODEs".format(h)) # use secant method for finding the root in u_0(E) # integration returns (u(r_i), r_i) --> u(r_0) = solver(E)[0][0] root = newton(lambda en0: solve_rseq(en0)[0][0], -2.0) print("[INFO] found energy eigenvalue @ E = {:.5f} Ha".format(root)) # integrate again using the correct energy u, r = solve_rseq(root) # normalize u**2 to 1 norm = trapz(u ** 2, r) u /= np.sqrt(norm) print("[INFO] normalizing |u(r)|^2 from {:.5f} to 1".format(norm)) # interpolate normalized u(r) using B-splines u_spl = Spline(r, u) # determine w(r) via Poisson's equation for single-orbital density n_s(r) w, r = solve_poisson(u_spl) # find hom.sol. w_hom(r) = a*r to match BC w(r_max) = q_tot = 1 for H-atom beta = (1 - w[-1]) / r[-1] w += beta * r print("[INFO] adding w_hom(r) = b * r --> b = {:.4f}".format(beta)) w_spl = Spline(r, w) v_spl = Spline(r, w / r) # compute Hartree energy (verify integration interval choice in plot later) eh = 0.5 * trapz(w_spl(r) / r * u_spl(r) ** 2, r) print("[INFO] Hartree energy: {:.5f} Ha".format(eh)) # compare numerical vs. exact Hartree potential energy function w_exact = lambda r: -(r + 1) * np.exp(-2 * r) + 1 # noqa v_exact = lambda r: w_exact(r) / r # noqa plt.plot(r, w_spl(r), lw=2, ls="--", label=r"$w(r)$ num") plt.plot(r, v_spl(r), lw=2, ls="--", label=r"$v(r)$ num") plt.plot(r, w_exact(r), lw=3, alpha=0.5, label=r"$w(r)$ exact") plt.plot(r, v_exact(r), lw=3, alpha=0.5, label=r"$v(r)$ exact") # integrand from Hartree energy --> integrating up to r=10 is sufficient label = r"$v(r) \, |u(r)|^2$" plt.plot(r, v_spl(r) * u_spl(r) ** 2, alpha=0.5, lw=3, label=label) plt.xlabel("r in Bohr") plt.legend(loc="best", fancybox=True, shadow=True) plt.show()
def _interp(r, y, nr): from scipy.interpolate import InterpolatedUnivariateSpline as Spline n = len(r) if n == 0: return np.nan * nr elif n == 1: return (y / r) * nr else: return Spline(r, y / r, k=1)(nr) * nr
def smooth_spline(data, key, k, smooth): import numpy as np from scipy.interpolate import UnivariateSpline as Spline span = max(data[key]) - min(data[key]) y = np.array(data[key]) x = np.linspace(0, 1, num=len(y), endpoint=False) spl = Spline(x, y, k=k, s=smooth * span * k) return spl
def hotqcd_e3p_T4(T, *args, **kwargs): """ Evaluate the trace anomaly (e-3p)/T^4 for the HotQCD EOS. See Eq. (5) in http://inspirehep.net/record/1307761: (e-3p)/T^4 = T * d/dT(p/T^4) """ # numerically differentiate via interpolating spline spl = Spline(T, hotqcd_p_T4(T, *args, **kwargs)) return T * spl(T, nu=1)
def interpolate_caldata_worker(self, offset_times, bbcal_times, all_times): sdata = get_data_columns(self.df) # create 2 new pd.DataFrames to hold the interpolated gains and offsets offsets_interp = pd.DataFrame(index=sdata.index) gains_interp = pd.DataFrame(index=sdata.index) for det in thermal_detectors: # change k for the kind of fit you want s_offset = Spline(offset_times, self.offsets[det], s=0.0, k=self.calfitting_order) s_gain = Spline(bbcal_times, self.gains[det], s=0.0, k=self.calfitting_order) offsets_interp[det] = s_offset(all_times) gains_interp[det] = s_gain(all_times) return offsets_interp, gains_interp
def get_full_load(ignition_type): """ Returns vehicle full load curve. :param ignition_type: Engine ignition type (positive or compression). :type ignition_type: str :return: Vehicle normalized full load curve. :rtype: scipy.interpolate.InterpolatedUnivariateSpline """ return Spline(*dfl.functions.get_full_load.FULL_LOAD[ignition_type], ext=3)
def compute_xi_real(self, rr, b1, b2, bs, b3, alpha, alpha_v, alpha_s0, alpha_s2, s2fog): ''' Compute the real-space correlation function at rr. ''' # This is just the zeroth moment: xieft = self.ximatter + b1*self.xitable[:,1] + b1**2*self.xitable[:,2]\ + b2*self.xitable[:,3] + b1*b2*self.xitable[:,4]\ + b2**2 * self.xitable[:,5]\ + bs*self.xitable[:,6] + b1*bs*self.xitable[:,7]\ + b2*bs*self.xitable[:,8]\ + bs**2*self.xitable[:,9] + b3*self.xitable[:,10]\ + b1*b3*self.xitable[:,11] + alpha*self.xict xir = Spline(self.rint, xieft)(rr) return xir