def test_surface_sampling(self): np.random.seed(1) nquadpoints = int(4e2) surface = SurfaceRZFourier(nfp=1, stellsym=True, mpol=1, ntor=0, quadpoints_phi=nquadpoints, quadpoints_theta=nquadpoints) dofs = surface.get_dofs() dofs[0] = 1 dofs[1] = 0.8 surface.set_dofs(dofs) n = np.linalg.norm(surface.normal(), axis=2) print(np.min(n), np.max(n)) start = int(0.2*nquadpoints) stop = int(0.5*nquadpoints) from scipy.integrate import simpson quadpoints_phi = surface.quadpoints_phi quadpoints_theta = surface.quadpoints_theta lineintegrals = [simpson(y=n[i, start:stop], x=quadpoints_theta[start:stop]) for i in range(start, stop)] area_of_subset = simpson(y=lineintegrals, x=quadpoints_phi[start:stop]) total_area = surface.area() print("area_of_subset/total_area", area_of_subset/total_area) nsamples = int(1e6) xyz, idxs = draw_uniform_on_surface(surface, nsamples, safetyfactor=10) samples_in_range = np.sum((idxs[0] >= start) * (idxs[0] < stop)*(idxs[1] >= start) * (idxs[1] < stop)) print("samples_in_range/nsamples", samples_in_range/nsamples) print("fraction of samples if uniform", (stop-start)**2/(nquadpoints**2)) assert abs(samples_in_range/nsamples - area_of_subset/total_area) < 1e-2
def bounded_area(x, y): y += np.abs(np.amin(y)) up_y = [] up_x = [] low_y = [] low_x = [] upper = True for xx, yy in zip(x, y): if upper: up_y.append(yy) up_x.append(xx) else: low_y.append(yy) low_x.append(xx) if xx == np.amax(x): upper = False up = inter.simpson(up_y, up_x) low = inter.simpson(low_y, low_x) return up - np.abs(low)
def test_curve_sampling(self): np.random.seed(1) nquadpoints = int(1e5) curve = CurveRZFourier(nquadpoints, 1, 1, True) dofs = curve.get_dofs() dofs[0] = 1 dofs[1] = 0.9 curve.set_dofs(dofs) l = curve.incremental_arclength() print(np.min(l), np.max(l)) start = int(0.1*nquadpoints) stop = int(0.5*nquadpoints) quadpoints = curve.quadpoints from scipy.integrate import simpson length_of_subset = simpson(y=l[start:stop], x=quadpoints[start:stop]) total_length = simpson(y=l, x=quadpoints) print("length_of_subset/total_length", length_of_subset/total_length) nsamples = int(1e6) xyz, idxs = draw_uniform_on_curve(curve, nsamples, safetyfactor=10) samples_in_range = np.sum((idxs >= start) * (idxs < stop)) print("samples_in_range/nsamples", samples_in_range/nsamples) print("fraction of samples if uniform", (stop-start)/(nquadpoints)) assert abs(samples_in_range/nsamples - length_of_subset/total_length) < 1e-3
def compute_refpost_on_grid(d, y_obs, theta_grid, tt, pdf_prior, density, bar=True): # compute likelihoods for each dimension of d like_pdfs = list() for idx in tqdm(range(d.shape[0]), disable=not bar): ys_d = y_obs[idx] prediction = theta_grid[:, 0] + theta_grid[:, 1] * d[idx][0] diff = ys_d - prediction pdf = density(diff) like_pdfs.append(pdf) like_pdfs = np.array(like_pdfs) # multiply likelihoods to get joint; assume independence across dims joint_like = np.prod(like_pdfs.T, axis=1) # manually normalize over grid and compute posterior prod = joint_like * pdf_prior Z = integrate.simpson( integrate.simpson(prod.reshape(len(tt), len(tt)), tt, axis=1), tt) pdf_post = prod / Z return pdf_post
def make_J(fk, flam, fmu, k_grid, lambda_grid, mu_grid): Q = len(flam) glam_sparse = simpson(make_single_f_tilde(fk, flam, k_grid, lambda_grid), x=k_grid, axis=0, even='avg') offset_lam = flam * np.log( np.abs((np.cos(lambda_grid) + 1) / (np.cos(lambda_grid) - 1))) glam_sparse = glam_sparse + offset_lam gmu_sparse = simpson(make_single_f_tilde(fk, fmu, k_grid, mu_grid), x=k_grid, axis=0, even='avg') offset_mu = fmu * np.log( np.abs((np.cos(mu_grid) + 1) / (np.cos(mu_grid) - 1))) gmu_sparse = gmu_sparse + offset_mu J_sparse = np.kron(glam_sparse, np.ones((Q, 1))) J_sparse = J_sparse - np.kron(gmu_sparse, np.ones((Q, 1))).T J_sparse = J_sparse / ( np.kron(np.cos(lambda_grid).reshape((Q, 1)), np.ones( (1, Q))) - np.kron(np.ones((Q, 1)), np.cos(mu_grid).reshape((1, Q)))).T J_sparse = J_sparse * make_v_lam(flam, lambda_grid) * np.pi / Q return J_sparse
def func(z0, a): z = emis_av / np.max(emis_av) z_lim = np.where(z >= z0, z, 0) vol = integrate.simpson(2 * np.pi * xau_ax * np.average(z_lim, axis=1), xau_ax) vol_tot = integrate.simpson(2 * np.pi * xau_ax * np.average(z, axis=1), xau_ax) # print(vol/vol_tot) return vol / vol_tot - a
def oscillator_integral(parameters, n, sw, offset=None): """Determines the (absolute) integral of the Fourier transform of an oscillator. Parameters ---------- parameters : numpy.ndarray Oscillator parameters of the following form: * **1-dimensional data:** .. code:: python parameters = numpy.array([a, φ, f, η]) * **2-dimensional data:** .. code:: python parameters = numpy.array([a, φ, f1, f2, η1, η2]) n : [int], [int, int] Number of points to construct signal from in each dimension. sw : [float], [float, float] Sweep width in each dimension, in Hz. offset : [float], [float, float], or None, default: None Transmitter offset frequency in each dimension, in Hz. If set to `None`, the offset frequency will be set to 0Hz in each dimension. Returns ------- integral : Notes ----- The integration is performed using the composite Simpsons rule, provided by `scipy.integrate.simpson <https://docs.scipy.org/doc/scipy/reference/\ generated/scipy.integrate.simpson.html#scipy.integrate.simpson>`_ Spacing of points along the frequency axes is set a `1` (i.e. `dx = 1`). """ fid, _ = make_fid(np.expand_dims(parameters, axis=0), n, sw, offset) spectrum = np.absolute(ft(fid)) for axis in range(spectrum.ndim): try: integral = integrate.simpson(integral, axis=axis) except NameError: integral = integrate.simpson(spectrum, axis=axis) return integral
def calculate_f_xuv(spectrum): """ Calculates the total XUV flux given the spectrum at the planet (Equation 14 of Vissapragada et al. 2022, where the minimum threshold is 13.6 eV. This function currently assumes the spectrum is truncated at 13.6 eV and does not include lower energies. Parameters ---------- spectrum (``dict``): Spectrum of the host star arriving at the planet covering fluxes up to the wavelength corresponding to the energy to ionize hydrogen (13.6 eV, or 911.65 Angstrom). Can be generated using ``tools.make_spectrum_dict`` or ``tools.generate_muscles_spectrum``. Currently we assume that the spectrum does not include lower energies than 13.6 eV. Returns ------- f_xuv (``astropy.Quantity``): The integrated XUV flux. """ wav_grid = spectrum['wavelength'] * spectrum['wavelength_unit'] flux_grid = spectrum['flux_lambda'] * spectrum['flux_unit'] flux_grid = flux_grid.to(u.erg / u.s / u.cm / u.cm / u.Hz, equivalencies=u.spectral_density(wav_grid)) wavs_hz = wav_grid.to(u.Hz, equivalencies=u.spectral())[::-1] flux_grid = flux_grid[::-1] f_xuv = simpson(flux_grid, x=wavs_hz) * u.erg / u.s / u.cm**2 return f_xuv
def _guessimate_to_tds_weight(self, t, w, r) -> List[float]: target_tds_weight = self.shot.bw * self.shot.tds / 100.0 t_begin = t[0] t_end = t[-1] t_span = t_end - t_begin def puck_degradation(t: float) -> float: return 1.0 - 0.7 / t_span * (t - t_begin) integral_cumul = [0.0] + list(v for v in integration.cumtrapz(w, t)) if not eq_within(self.shot.bw, integral_cumul[-1], self.shot.bw * 0.2): raise RuntimeError( 'Too much error between beverage weight: %.02fg, cumulative weight: %.02fg' % (self.shot.bw, integral_cumul[-1])) tds_weight_curve = list() for idx, (w0, w1) in enumerate(zip(integral_cumul[:-1], integral_cumul[1:])): dw = w1 - w0 dt = t[idx + 1] - t[idx] ratio = dw / integral_cumul[ -1] # take ratio of tds in the current weight "shard" tds_weight_curve.append(ratio * target_tds_weight / dt * (r[idx]**0.5) * puck_degradation(t[idx])) tds_weight_curve = tds_weight_curve tds_weight_degraded = integration.simpson(tds_weight_curve, t[1:]) weight_scale = tds_weight_degraded / target_tds_weight return Analysis._smoothing( [0.0] + [w / weight_scale for w in tds_weight_curve])
def solve_method_2(input_): t = 1e-6 def Omega(_t: float, *args) -> float: if _t <= 0 or _t >= t: return 0 # scaling = 1 / 100 scaling = input_ return np.sin(_t / t * np.pi) * scaling t_list = np.linspace(0, t, 500) Omegas = np.array([Omega(_t) for _t in t_list]) area = simpson(Omegas, t_list) print(f"Area: {area}") time_independent_terms = Qobj( np.zeros((3, 3)) + Vdd * 1e9 * rcrt @ rcrt.T) Omega_coeff_terms = Qobj((rcrt @ rc1t.T + rc1t @ rcrt.T) / 2) solver = mesolve( [time_independent_terms, [Omega_coeff_terms, Omega]], psi_0, t_list, options=Options(store_states=True, nsteps=20000), ) c_r1 = np.abs(solver.states[-1].data[1, 0]) return c_r1
def get_mjs(self, xe_func, n_simpson=1000): """Returns 1d numpy array of shape (npc,1) for pc amplitudes. Arg: xe_func: a function for the global ionization history xe(z), taking redshift z as input argument, valued on z = [6, 30]. n_simpson (optional): an integer for the number of z intervals to use for the integration with Simpson rule (if an odd number is given, we add one automatically to make it even). """ npc = self.pc_data.npc if (n_simpson % 2 == 1): n_simpson = n_simpson + 1 zarray = np.linspace(self.pc_data.zmin, self.pc_data.zmax, n_simpson + 1) xe_fid_array = self.pc_data.xe_fid_func(zarray) xe_array = xe_func(zarray) mjs = np.zeros(npc) for j in range(npc): xe_mj_array = self.pc_data.xe_mjs_func[j](zarray) integrand = xe_mj_array * (xe_array - xe_fid_array) mjs[j] = integrate.simpson(integrand, zarray) mjs = mjs / (self.pc_data.zmax - self.pc_data.zmin) return mjs
def spec_av_cross(r_grid, spectrum, t_coef, species): """ Calculates the heating cross-section for photoionization using Equation (16) of Vissapragada et al. (2022). Parameters ---------- r_grid (``numpy.ndarray``): The radius grid for the calculation. An astropy unit (like u.Rjup) must be specified for each value on the grid. spectrum (``dict``): Spectrum of the host star arriving at the planet covering fluxes up to the wavelength corresponding to the energy to ionize hydrogen (13.6 eV, or 911.65 Angstrom). Can be generated using ``tools.make_spectrum_dict`` or ``tools.generate_muscles_spectrum``. Currently we assume that the spectrum does not include lower energies than 13.6 eV. t_coef (``numpy.ndarray``): The transmission coefficient profile for the wind as a function of frequency and altitude. In the optically-thin part of the outflow this should be very close to 1. species (``str``): The photoionzation target for which we are calculating the heating cross-section. Must be one of 'hydrogen', 'helium', or 'helium+'. Returns ------- cross (``astropy.Quantity``): Heating cross-section in cm**2 for the selected species. """ wav_grid = spectrum['wavelength'] * spectrum['wavelength_unit'] flux_grid = spectrum['flux_lambda'] * spectrum['flux_unit'] flux_grid = flux_grid.to(u.erg / u.s / u.cm / u.cm / u.Hz, equivalencies=u.spectral_density(wav_grid)) wavs_hz = wav_grid.to(u.Hz, equivalencies=u.spectral())[::-1] flux_grid = flux_grid[::-1] xx, yy = np.meshgrid(wavs_hz, r_grid) threshold = threshes[species] crosses = { 'hydrogen': h_photo_cross, 'helium': helium_photo_cross, 'helium+': heplus_photo_cross } cross = crosses[species] evgrid = xx.to(u.eV, equivalencies=u.spectral()) eta_grid = 1 - threshold / evgrid spec_grid, __ = np.meshgrid(flux_grid, r_grid) crossgrid = cross(xx) crossgrid[xx.to(u.eV, equivalencies = u.spectral()) < \ threshold] = 0.*u.cm**2 numgrid = eta_grid * spec_grid * crossgrid * t_coef numgrid = numgrid.to(u.erg / u.s / u.Hz) num = simpson(numgrid, x=wavs_hz, axis=-1) * u.erg / u.s F_XUV = calculate_f_xuv(spectrum) cross = num / F_XUV return cross.to(u.cm**2)
def TDI(_data): N = len(_data) if len(_data.shape) == 1: _data = _data.reshape((N, 1)) _data = zmean(_data) _dataout = np.zeros_like(_data) _dataout[0, :] = _data[0, :] * dt / 2 for ii in range(1, N): _dataout[ii, :] = intg.simpson(_data[0:ii, :], dx=dt, axis=0) return _dataout
def sigma_8_log_spaced(P, k=None): R = 8.0 # Mpc h^-1 def W(k, r): return 3.0 * (np.sin(k * R) - k * R * np.cos(k * R)) / (k * R)**3 dlogk = np.log(k[1] / k[0]) # natural log here! (input Pk must be log-spaced!) input_sigma_8 = np.sqrt( simpson(P * k**3 * W(k, R)**2 / (2 * np.pi**2), dx=dlogk)) return input_sigma_8
def printAll(self): out = self.problem.output_folder + "/Energies.dat" with open(out, 'w') as fu: st = "Kin\tsum(eps)\tint(rho*v)\n" fu.write(st) int_rhov = integrate.simpson( self.problem.tab_rho * self.potential * self.grid**2, self.grid) * 4. * np.pi st = "{t:.5f}\t{eps:.5f}\t{integ:.5f}\n".format( t=self.problem.kinetic, eps=self.sum_eigenvalues, integ=int_rhov) fu.write(st) return self.problem.kinetic, self.sum_eigenvalues, int_rhov
def simpson_integral_df(waveforms, dx=1): ''' waveforms: the waveforms collection DataFrame dx: the interval between the y-values ''' Integrals = [] for i in range(waveforms.shape[1]): event = waveforms[waveforms.columns[i]] integral = simpson(y=event, dx=dx) Integrals.append(integral) return (pd.DataFrame(Integrals, index=waveforms.columns))
def loop_simps_riemann(y): # data needed to be a column array # data needed to be column array, this needs fixing in labview # data needed to be column arrays here as well, this needs fixing in labview # create plots #acquire the number of columns in the dataframe for use later riemannarray = [] simpsonarray = [] for counter, row in enumerate(y): riemannval = riemann(0, len(y[counter]), y[counter], 1) simpsonval = simpson(y[counter], np.arange(len(y[counter]))) riemannarray.append(riemannval) simpsonarray.append(simpsonval) return [simpsonarray, riemannarray]
def compute_pred_post(y, d, y_T_grid, future_d, theta_grid, tt, pdf_prior, density, bar=True): # compute reference posterior pdf_post = compute_refpost_on_grid(d, y.reshape(-1, 1), theta_grid_post, tt_post, pdf_prior_postgrid, density, bar=False) # compute posterior prediction no_noise = theta_grid[:, 0] + theta_grid[:, 1] * future_d[0][0] pred_post = list() for yt in tqdm(y_T_grid, disable=not bar): diff = yt - no_noise kde = density(diff) joint = kde * pdf_post pdf = integrate.simpson( integrate.simpson(joint.reshape(len(tt), len(tt)), tt, axis=1), tt) pred_post.append(pdf) pred_post = np.array(pred_post) # normalize posterior prediction Z_post = integrate.simpson(pred_post, y_T_grid) pred_post = pred_post / Z_post return pred_post
def integrate(spectrum, name, xmin, xmax, spec_type=None): """Integrate a sub-range of a spectrum. By integrating a spectrum in luminosity units between [xmin, xmax], it is possible to calculate the total luminosity of a given wavelength band. For example, by using xmin, xmax = 3000, 8000 Angstroms, the total optical luminosity can be estimated. This function uses Simpson's rule to approximate the integral given the wavelength/frequency bins (used as the sample points) and the luminosity bins. Parameters ---------- spectrum: pypython.Spectrum The spectrum class containing the spectrum to integrate. name: str The name of the spectrum to integrate, i.e. "60", "Emitted". xmin: float The lower integration bound, in Angstroms. xmax: float The upper integration bound, in Angstroms. spec_type: str [optional] The spectrum type to use. If this is None, then spectrum.current is used Returns ------- The integral between of the spectrum between xmin and xmax. """ if spec_type: key = spec_type else: key = spectrum.current if spectrum[key].units == SpectrumUnits.l_lm or spectrum[ key].units == SpectrumUnits.f_lm: sample_points = spectrum[key]["Lambda"] else: sample_points = spectrum[key]["Freq."] tmp = xmin xmin = angstrom_to_hz(xmax) xmax = angstrom_to_hz(tmp) sample_points, y = get_xy_subset(sample_points, spectrum[key][name], xmin, xmax) return simpson(y, sample_points)
def integrator(lb, rb, energy, pot_func): x, dx = np.linspace(lb, rb, 500, retstep=True) phi = np.zeros_like(x) phi[0] = 0 phi[1] = 0.0001 g = 2 * (energy - pot_func(x)) f = 1 + ((dx**2) / 12) * g for i in range(len(phi))[1:-1]: phi[i + 1] = ((12 - 10 * f[i]) * phi[i] - f[i - 1] * phi[i - 1]) / f[i + 1] # normalize with Simpsons norm_factor = simpson(abs(phi)**2, x) if abs(norm_factor) < 1e-8: print("Normalization factor is zero, setting to 1") norm_factor = 1 return x, phi / np.sqrt(norm_factor)
def computeTangentsAndNormals(r, X, k=3, ds=1.0, ref=np.array([0.0, 1.0, 0.0], dtype=np.float64)): finaldelta = np.linalg.norm(X[-1] - X[0], ord=2) Xaug = np.concatenate([X, X[0].reshape(1, -1)], axis=0) raug = np.concatenate([r, np.asarray([r[-1] + finaldelta])], axis=0) raug = raug - raug[0] garbagespline: interp.BSpline = interp.make_interp_spline( raug, Xaug, k=k, bc_type="periodic") garbagesplineder: interp.BSpline = garbagespline.derivative() truedistances: np.ndarray = np.zeros_like(raug) for i in range(1, truedistances.shape[0]): rsumsamp: np.ndarray = np.linspace(raug[i - 1], raug[i], num=5) velsubsamp: np.ndarray = np.linalg.norm(garbagesplineder(rsumsamp), ord=2, axis=1) truedistances[i] = truedistances[i - 1] + integrate.simpson(velsubsamp, x=rsumsamp) spline: interp.BSpline = interp.make_interp_spline(truedistances, Xaug, k=k, bc_type="periodic") tangentspline: interp.BSpline = spline.derivative(nu=1) rsamp = np.linspace(truedistances[0], truedistances[-1] - ds, num=int(round(truedistances[-1] / ds))) points = spline(rsamp) tangents = tangentspline(rsamp) speeds = np.linalg.norm(tangents, ord=2, axis=1) unit_tangents = tangents / speeds[:, np.newaxis] numpoints = points.shape[0] ref_ = np.stack([ref for asdf in range(numpoints)]) v1 = np.cross(unit_tangents, ref_) v1 = v1 / np.linalg.norm(v1, axis=1, ord=2)[:, np.newaxis] v2 = np.cross(v1, unit_tangents) v2 = v2 / np.linalg.norm(v2, axis=1, ord=2)[:, np.newaxis] normals = np.cross(v2, unit_tangents) unit_normals = normals / np.linalg.norm(normals, axis=1, ord=2)[:, np.newaxis] return spline, points, speeds, unit_tangents, unit_normals, rsamp
def test_simpson(self): y = np.arange(17) assert_equal(simpson(y), 128) assert_equal(simpson(y, dx=0.5), 64) assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32) y = np.arange(4) x = 2**y assert_equal(simpson(y, x=x, even='avg'), 13.875) assert_equal(simpson(y, x=x, even='first'), 13.75) assert_equal(simpson(y, x=x, even='last'), 14)
def dln_sigma_inv_dM(k, P, M=None, Omega_M=None): """compute d ln sigma^-1 / dM analytically.""" mean_rho = (rho_crit * Omega_M) # in comoving units def dW_dM(k, R): x = k * R # no Delta_vir return k * M**(-2./3.) * (3.0/(4.0*np.pi*mean_rho))**(1./3.) * \ (np.sin(x)/x**2 + 3.0*np.cos(x)/x**3 - 3.0*np.sin(x)/x**4) R = ((3.0 * M) / (4.0 * np.pi * mean_rho))**(1. / 3.) # no Delta_vir integrand = P * k**2 * (2.0 * W(k, R) * dW_dM(k, R)) / (2.0 * np.pi**2) this_dln_sigma_R_inv_dM = simpson(integrand, x=k) return this_dln_sigma_R_inv_dM
def ecdf(xs, dens, time=False): from scipy.integrate import simpson from scipy.interpolate import UnivariateSpline as spline mn, mx = min(xs), max(xs) if time: xs = (xs - mn) / (mx - mn) cdf = [] if time: areaUnderCurve = spline(xs, dens).integral(0, 1) else: areaUnderCurve = spline(xs, dens).integral(mn, mx) for i in range(len(xs)): x, den = xs[:i], dens[:i] cdf.append(simpson(den, x) / areaUnderCurve) return cdf
def update_plot(q0): # Clear axes for ax_ in (ax, ax_ovlp): ax_.cla() HO_fin = HarmonicOscillator(offset=15, q0=q0) HOs = (HO_init, HO_fin) # Plot potentials for HO in HOs: ys = HO.pot(qs) # Truncate potentials ys[ys >= HO.offset + trunc_pot] = np.nan ax.plot(qs, ys) # Calculate and plot wavefunctions for v in vs: for HO in HOs: lvl = HO.level(v) ax.axhline(lvl, color="black", ls="--", lw=0.5) wf = HO.wf(qs, v) ax.plot(qs, wf + lvl) # Calculate overlaps between GS WF and ES wavefunctions overlaps = np.zeros_like(vs, dtype=float) for v in vs: wf = HO_fin.wf(qs, v) ovlp = simpson(wf_gs_init * wf, qs)**2 overlaps[v] = ovlp ax_ovlp.stem(overlaps) # Label for v in vs: xy = (v, min(45, overlaps[v])) ax_ovlp.annotate(f"0-{v}", xy, ha="center") ax_ovlp.set_ylim(0, 1.0) ax_ovlp.set_title("Wavefunction overlaps") ax_ovlp.set_ylabel("Overlap") sqrt2 = 2**0.5 ax.axvline(-sqrt2) ax.axvline(sqrt2) ax.set_ylim(0, 30) ax.set_xlabel("q") ax.set_ylabel(r"$\Delta E$") ax.set_title("Franck-Condon Principle")
def loop_integration(d_cols, y): # data needed to be a column array # data needed to be column array, this needs fixing in labview # data needed to be column arrays here as well, this needs fixing in labview # create plots #acquire the number of columns in the dataframe for use later riemannarray = [] simpsonarray = [] profilecoordsarray = [] print(y[0]) for counter, row in enumerate(y): riemannval = riemann(0, len(y[counter]), y[counter], 1) simpsonval = simpson(y[counter], np.arange(len(y[counter]))) riemannarray.append(riemannval) simpsonarray.append(simpsonval) for element in d_cols: profilecoordsarray.append(element) return [profilecoordsarray, riemannarray, simpsonarray]
def get_area(scan, area_method, lorentz): if area_method not in AREA_METHODS: raise ValueError(f"Unknown area method: {area_method}.") if area_method == "fit_area": area_v = 0 area_s = 0 for name, param in scan["fit"].params.items(): if "amplitude" in name: if param.stderr is None: area_v = np.nan area_s = np.nan else: area_v += param.value area_s += param.stderr else: # area_method == "int_area" y_val = scan["counts"] x_val = scan[scan["scan_motor"]] y_bkg = scan["fit"].eval_components(x=x_val)["f0_"] area_v = simpson(y_val, x=x_val) - trapezoid(y_bkg, x=x_val) area_s = np.sqrt(area_v) if lorentz: # lorentz correction to area if scan["zebra_mode"] == "bi": twotheta = np.deg2rad(scan["twotheta"]) corr_factor = np.sin(twotheta) else: # zebra_mode == "nb": gamma = np.deg2rad(scan["gamma"]) nu = np.deg2rad(scan["nu"]) corr_factor = np.sin(gamma) * np.cos(nu) area_v = np.abs(area_v * corr_factor) area_s = np.abs(area_s * corr_factor) scan["area"] = (area_v, area_s)
def test_simps(self): # Basic coverage test for the alias y = np.arange(4) x = 2**y assert_equal(simpson(y, x=x, dx=0.5, even='first'), simps(y, x=x, dx=0.5, even='first'))
def shgFROG(filename, initialGuess = 'gaussian', tau = None, method = 'copra', dt = None , maxIter = 100, symmetrizeGrid = False, wavelengthLimits = [0,np.inf], gridSize = None, marginalCorrection = None): delays, wavelengths, trace = library_frog.unpack_data(filename,wavelengthLimits) """ Recenter the trace to zero delay. Otherwise copra behaves weirdly""" marginal_t = simpson(trace,wavelengths,axis = 1) t_0 = delays[np.argmax(marginal_t)] delays -= t_0 """ Removing negative values from trace. Seems to give slightly better results""" trace[trace<0] = 0 """ PCGPA algorithm requires a symmetric grid """ if method.lower() == 'pcgpa': symmetrizeGrid = True """ Adjust grid size if required """ if symmetrizeGrid: if gridSize is None: gridSize = [len(wavelengths),len(wavelengths)] else: gridSize[1] = gridSize[0] if dt is None: dt = np.mean(np.diff(delays)) if gridSize is not None: symTrace = np.zeros((gridSize[0],len(wavelengths))) delayLim = np.min([abs(delays[0]),abs(delays[-1])]) # Symmetrize delay grid wrt frequency grid symDelays = np.linspace(-delayLim,delayLim,gridSize[0]) for ii, wavelength in enumerate(wavelengths): interpTrace = interp(delays,trace[:,ii],'quadratic',bounds_error=False,fill_value=0) symTrace[:,ii] = interpTrace(symDelays) symTrace[:,ii] = 0.5*interpTrace(np.hstack((symDelays[symDelays<=0],symDelays[symDelays<0][-1::-1]))) + 0.5*interpTrace(np.hstack((symDelays[symDelays>=0][-1::-1],symDelays[symDelays>0]))) trace = symTrace delays = symDelays dt = np.mean(np.diff(delays)) """ Define time/frequency grids for input pulse """ if gridSize is not None: ft = pypret.FourierTransform(gridSize[1], dt = dt) else: ft = pypret.FourierTransform(len(delays), dt = dt) """ Integrate over delay axis""" marginal_w = simpson(trace,delays,axis = 0) """ Carrier wavelength """ lambda_0 = C/(simpson(C/wavelengths[-1::-1]*marginal_w[-1::-1],C/wavelengths[-1::-1],axis = 0)/simpson(marginal_w[-1::-1],C/wavelengths[-1::-1],axis = 0)) * 2 """ Marginal correction: compare frequnecy marginal to spectrum autoconvolution. Relative differences between the two should correspond to experimental bandwidth limitation. Trace is adjusted accordingly to offset this effect. """ if marginalCorrection is not None: data = np.load(marginalCorrection) corrWavelengths = data['wavelengths']*1e-9 corrSpectrum = data['spectrum'] corrSpectrumRaw = np.copy(corrSpectrum) corrW = np.linspace(-4*np.pi*C/corrWavelengths[0],4*np.pi*C/corrWavelengths[0],4*len(corrWavelengths)+1) corrSpectrum = interp( 2*np.pi*C/corrWavelengths[-1::-1], corrSpectrum[-1::-1] ,bounds_error=False,fill_value=0)(corrW) x,y = fq.ezifft(corrW,corrSpectrum) absc_conv,autoConv = fq.ezfft(x,y**2,neg = True) autoConv = np.real(autoConv) autoConv = interp(absc_conv, autoConv,bounds_error=False,fill_value=0)(2*np.pi*C/wavelengths) marginal_w_corr = np.copy(marginal_w) marginal_w_corr[marginal_w_corr<=0] = marginal_w_corr[marginal_w_corr>0].min() marginal_w_corr = fq.ezsmooth(marginal_w_corr,15,'hanning') marginalCorr = ( autoConv/autoConv.max() ) / ( marginal_w_corr / marginal_w_corr.max() ) for ii, delay in enumerate(delays): trace[ii,:]*=marginalCorr plt.figure() plt.plot(wavelengths*1e9,autoConv/autoConv.max(),label = 'From spectrum') plt.plot(wavelengths*1e9,marginal_w_corr / marginal_w_corr.max(), label = 'From FROG trace') plt.xlabel('Wavelengths [nm]') plt.ylabel('Frequency margianal') plt.ylim([0,1.05]) plt.legend() plt.figure() plt.plot(wavelengths*1e9,marginalCorr) plt.xlabel('Wavelengths [nm]') plt.ylabel('Marginal correction factor') plt.ylim(bottom=0) """ Instantiate a pulse object w/ appropriate carrier wavelength (other parameters don't matter here)""" pulseP = pypret.Pulse(ft, lambda_0) pypret.random_gaussian(pulseP, 1e-15, phase_max=0.0) """ Instantiate a PNPS object for SHG-FROG technique""" pnps = pypret.PNPS(pulseP, "frog", "shg") pnps.calculate(pulseP.spectrum, delays) """ Export SHG frequency grid """ w_shg = pnps.process_w w_fund = pulseP.w+pulseP.w0 """ Interpolate trace to shg frequency grid """ trace_w = np.zeros((len(delays),len(w_shg))) for ii,delay in enumerate(delays): interpTrace = interp(C/wavelengths[-1::-1],trace[ii,:][-1::-1],'quadratic',bounds_error=False,fill_value=0) trace_w[ii,:] = interpTrace(w_shg/2/np.pi) """ Plot interpolated trace (to check interpolation errors) """ plt.figure() plt.pcolormesh(delays*1e15,(2*np.pi*C/w_shg)*1e9,trace_w.transpose()) if marginalCorrection is None: plt.title('Input trace (interpolated)') else: plt.title('Input trace (corrected + interpolated)') plt.xlabel('Delay [fs]') plt.ylabel('Wavelengths [nm]') plt.ylim(wavelengths[0]*1e9,wavelengths[-1]*1e9) plt.colorbar() """ Reformat trace for retriever """ traceInput = pypret.mesh_data.MeshData(trace_w,delays,w_shg,labels = ['Delay','Frequency','']) """ Initial guess for iterative algorithm. Three options: Gaussian (default): Fits a gaussian pulse to both the time & freq. marginals. Struggles w/ non-bell-shaped spectra. Spectrum: Takes the independantly measured spectrum as initial guess with flat phase. RANA: Uses "RANA" algorithm to deduce the spectrum from the trace. Uses flat phase. Struggles w/ noise.""" if initialGuess.lower() == 'gaussian': if tau is None: autocorr = simpson(trace,wavelengths,axis = 1) tau = library_frog.get_FWHM(delays,autocorr)/np.sqrt(2) /np.sqrt(2*np.log(2)) dw = library_frog.get_FWHM(2*np.pi*C/wavelengths[-1::-1],marginal_w[-1::-1])/np.sqrt(2) /np.sqrt(2*np.log(2)) tau_0 = 2 / (dw) if tau > tau_0: GDD = (tau**2*tau_0**2 - tau_0**4)**0.5/2 else: GDD = 0 else: dw = 2 / (tau/np.sqrt(2*np.log(2))) w_0 = 2*np.pi*C/lambda_0 initialGuess = np.complex128(np.exp(- (w_fund-w_0)**2 / dw**2)) * np.exp(1j*GDD*(w_fund-w_0)**2) elif (initialGuess.lower()=='spectrum') & (marginalCorrection is not None): initialGuess = interp( 2*np.pi*C/corrWavelengths[-1::-1], corrSpectrumRaw[-1::-1] ,bounds_error=False, fill_value=0)(w_fund) initialGuess[initialGuess<0]=0 initialGuess = np.complex128(initialGuess**0.5) initialGuess /= initialGuess.max() else: initialGuess = library_frog.RANA(delays,w_shg,trace_w,w_fund) """ Instantiate retriever """ ret = pypret.Retriever(pnps,method = method, verbose=True, maxiter=maxIter) """ Apply retrieval algorithm and print results """ ret.retrieve(traceInput, initialGuess) results = ret.result() """ Export retrieved pulse & trace """ pulseRetrieved = results.pulse_retrieved traceRetrieved = results.trace_retrieved pulseFrequencies = w_fund/(2*np.pi) traceFrequencies = w_shg/(2*np.pi) """ Make plots """ axSpectrum = library_frog.plot_output(pulseRetrieved, initialGuess, pulseFrequencies, traceRetrieved, traceFrequencies,delays, wavelengths) if marginalCorrection is not None: axSpectrum.plot(corrWavelengths*1e9,corrSpectrumRaw/corrSpectrumRaw.max(),'g--',linewidth = 3,label = 'Measured') axSpectrum.set_ylim([0,1.05]) axSpectrum.legend() return pulseRetrieved, initialGuess, pulseFrequencies, traceRetrieved, traceFrequencies,delays, wavelengths
# create grid tt = np.linspace(TMIN, TMAX, N_GRID) T0, T1 = np.meshgrid(tt, tt) theta_grid = np.vstack([T0.ravel(), T1.ravel()]).T # get normalized prior densities on grid pdf_prior = compute_prior_density(theta_grid) # evaluate prior predictive no_noise = theta_grid[:, 0] + theta_grid[:, 1] * future_d[0][0] pred = list() for yt in tqdm(y_T_grid): diff = yt - no_noise kde = density(diff) joint = kde * pdf_prior pdf = integrate.simpson( integrate.simpson(joint.reshape(len(tt), len(tt)), tt, axis=1), tt) pred.append(pdf) pred = np.array(pred) # normalize Z = integrate.simpson(pred, y_T_grid) pred = pred / Z # ----- POSTERIOR RUNS ----- # # important numbers TMIN, TMAX = -10, 10 # <------- TODO: TOO SMALL?! # create grid tt_post = np.linspace(TMIN, TMAX, N_GRID) T0, T1 = np.meshgrid(tt_post, tt_post)