def _calls_helper(y): xl = 1 xh = 5 try: fcSpline.FCS(xl, xh, y, ypp_specs=4) except ValueError as e: print("OK: caught ValueError", e) else: assert False for ypp_specs in [None, 1, 2, 3, (10, 20)]: for pp in [True, False]: spl = fcSpline.FCS(xl, xh, y, ypp_specs=ypp_specs, use_pure_python=pp) spl(xl) spl(xh) spl((xl + xh) / 2) spl(xl - 0.001) spl(xh + 0.001) xfine = np.linspace(xl, xh, 3 * (len(y) - 1) + 1) spl(xfine) xfine = np.linspace(xl - 0.001, xh, 5) spl(xfine) xfine = np.linspace(xl, xh + 0.001, 5) spl(xfine)
def test_cubic_fnc(): xl = 0 xh = 4 n = 5 x = np.linspace(xl, xh, n) y = [0, 1, 8, 27, 64] x_fine = np.linspace(xl, xh, 750) for use_pure_python in [True, False]: # use analytic value for the endpoint curvature spl = fcSpline.FCS(xl, xh, y, ypp_specs=(0, 24), use_pure_python=use_pure_python) y_spl = spl(x_fine) d = np.abs(x_fine**3 - y_spl) assert np.max(d) < 1e-14, "{}".format(np.max(d)) # use third order finite difference approximation for endpoint curvature spl = fcSpline.FCS(xl, xh, y, ypp_specs=1, use_pure_python=use_pure_python) y_spl = spl(x_fine) d = np.abs(x_fine**3 - y_spl) assert np.max(d) < 1e-14, "{}".format(np.max(d))
def new_process(self, y=None, seed=None): r"""Generate a new realization of the stochastic process. If ``seed`` is not ``None`` seed the random number generator with ``seed`` before drawing new random numbers. If ``y`` is ``None`` draw new random numbers to generate the new realization. Otherwise use ``y`` as input to generate the new realization. """ t0 = time.time() # clean up old data del self._interpolator del self._z self._proc_cnt += 1 if seed != None: log.info("use fixed seed ({}) for new process".format(seed)) np.random.seed(seed) if y is None: #random complex normal samples y = np.random.normal(scale=self._one_over_sqrt_2, size = 2*self.get_num_y()).view(np.complex) else: if len(y) != self.get_num_y(): raise RuntimeError("the length of 'y' ({}) needs to be {}".format(len(y), self.get_num_y())) self._z = self._calc_scaled_z(y) log.debug("proc_cnt:{} new process generated [{:.2e}s]".format(self._proc_cnt, time.time() - t0)) t0 = time.time() self._interpolator = fcSpline.FCS(x_low=0, x_high=self.t_max, y=self._z) log.debug("created interpolator [{:.2e}s]".format(time.time() - t0))
def compare_speed(): from scipy.interpolate import InterpolatedUnivariateSpline from time import time for n in [15, 150, 1500, 15000]: xl = -10 xh = 10 f = lambda x: np.sin(x) x = np.linspace(xl, xh, n) y = f(x) t0 = time() spl = fcSpline.FCS(xl, xh, y) t1 = time() spl_scip = InterpolatedUnivariateSpline(x, y, k=3) t2 = time() print("n", n) print("INIT - fcs: {:.3e}s, sci {:.3e}s factor {:.3g}".format( t1 - t0, t2 - t1, (t2 - t1) / (t1 - t0))) t_fcs = t_sci = 0 N = 10000 for i in range(10000): x = np.random.rand() * (xh - xl) + xl t0 = time() spl(x) t1 = time() spl_scip(x) t2 = time() t_fcs += (t1 - t0) t_sci += (t2 - t1) print("EVAL - fcs: {:.3e}s, sci {:.3e}s factor {:.3g}".format( t_fcs / N, t_sci / N, t_sci / t_fcs))
def test_spline_property(): xl = 0 xh = 10 n = 15 x = np.linspace(xl, xh, n) y = np.sin(x) spl = fcSpline.FCS(xl, xh, y, use_pure_python=True) # here we check that the spline evaluates exactly to the data points for i, xi in enumerate(x): d = abs(spl(xi) - y[i]) assert d < 1e-14, "d={} < 1e-14 failed".format(d) # here we check the continuity of the second derivatives for fac in [3, 5, 7]: xf, dx = np.linspace(xl, xh, 500, retstep=True) yf = spl(xf) y_pp = second_deriv(yf, dx) d = np.abs(y_pp[1:] - y_pp[:-1]) d1 = np.max(d) xf, dx = np.linspace(xl, xh, fac * 500, retstep=True) yf = spl(xf) y_pp = second_deriv(yf, dx) d = np.abs(y_pp[1:] - y_pp[:-1]) d2 = np.max(d) assert abs(fac - d1 / d2) < 0.02 # here we check convergence for complex function xl = 0 xh = 10 n = 2**(np.asarray([6, 8, 10, 12])) mrd = [5e-5, 5e-8, 6e-11, 3e-13] for i, ni in enumerate(n): x = np.linspace(xl, xh, ni) f = lambda x: np.sin(x) + 1j * np.exp(-(x - 5)**2 / 10) y = f(x) spl = fcSpline.FCS(xl, xh, y, ypp_specs=3) xf = np.linspace(xl, xh, 4 * (ni - 1) + 1) yf = spl(xf) rd = np.abs(f(xf) - yf) / np.abs(f(xf)) assert np.max(rd) < mrd[i]
def test_few_points(): xl = 0 xh = 1 n = 5 x = np.linspace(xl, xh, n) y = [1, 2, 1, 2, 1] spl = fcSpline.FCS(xl, xh, y, use_pure_python=True) for i in range(len(y)): assert spl(x[i]) == y[i]
def ex1(): n = 15 xl = -10 xh = 10 f = lambda x: np.sin(x) x = np.linspace(xl, xh, n) y = f(x) spl = fcSpline.FCS(xl, xh, y) xfine, dxfine = np.linspace(xl, xh, 500, retstep=True) yfine = spl(xfine) plt.plot(x, y, ls='', marker='.', label='data set y=sin(x)') plt.plot(xfine, yfine, label='interpol.') y_pp = np.gradient(np.gradient(yfine, dxfine), dxfine) plt.plot(xfine, y_pp, label='2nd derv.') plt.grid() plt.legend() plt.show()
def get_dt_for_accurate_interpolation(t_max, tol, ft_ref, diff_method=_absDiff): N = 32 sub_sampl = 2 while True: tau = np.linspace(0, t_max, N + 1) ft_ref_n = ft_ref(tau) ft_intp = fcSpline.FCS(x_low=0, x_high=t_max, y=ft_ref_n[::sub_sampl]) ft_intp_n = ft_intp(tau) ft_ref_0 = abs(ft_ref(0)) ft_ref_n /= ft_ref_0 ft_intp_n /= ft_ref_0 d = diff_method(ft_intp_n, ft_ref_n) log.info("acc interp N {} dt {:.2e} -> diff {:.2e}".format( N + 1, sub_sampl * tau[1], d)) if d < tol: return t_max / (N / sub_sampl) N *= 2
def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, diff_method='full', dm_random_samples=10**4, ret_eigvals=False, relative_difference=False): r"""increase the number of gridpoints until the desired accuracy is met This function increases the number of grid points of the discrete Fredholm equation exponentially until a given accuracy is met. The accuracy is determined from the deviation of the approximated auto correlation of the Karhunen-Loève expansion from the given reference auto correlation. .. math:: \Delta(n) = \max_{t,s \in [0,t_\mathrm{max}]}\left( \Big | \alpha(t-s) - \sum_{i=1}^n \lambda_i u_i(t) u_i^\ast(s) \Big | \right ) :param corr: the auto correlation function :param t_max: specifies the interval [0, t_max] for which the stochastic process can be evaluated :param ngfac: specifies the fine grid to use for the spline interpolation, the intermediate points are calculated using integral interpolation :param meth: the method for calculation integration weights and times, a callable or one of the following strings 'midpoint' ('midp'), 'trapezoidal' ('trapz'), 'simpson' ('simp'), 'fourpoint' ('fp'), 'gauss_legendre' ('gl'), 'tanh_sinh' ('ts') :param tol: defines the success criterion max(abs(corr_exact - corr_reconstr)) < tol :param diff_method: either 'full' or 'random', determines the points where the above success criterion is evaluated, 'full': full grid in between the fine grid, such that the spline interpolation error is expected to be maximal 'random': pick a fixed number of random times t and s within the interval [0, t_max] :param dm_random_samples: the number of random times used for diff_method 'random' :param ret_eigvals: if True, return also the eigen values :param relative_difference: if True, use relative difference instead of absolute :return: an array containing the necessary eigenfunctions of the Karhunen-Loève expansion for sampling the stochastic processes (shape=(num_eigen_functions, num_grid_points) The procedure works as follows: 1) Solve the discrete Fredholm equation on a grid with ng points. This gives ng eigenvalues/vectors where each ng-dimensional vector approximates the continuous eigenfunction. (:math:`t, u_i(t) \leftrightarrow t_k, u_{ik}` where the :math:`t_k` depend on the integration weights method) For performance reasons, especially when the auto correlation function evaluates slowly it is advisable to use a method which uses equally distributed times :math;`t_k`. 2) Approximate the eigenfunction on a finer, equidistant grid (:math:`ng_\mathrm{fine} = ng_\mathrm{fac}(ng-1)+1`) using .. math:: u_i(t) = \frac{1}{\lambda_i} \int_0^{t_\mathrm{max}} \mathrm{d}s \; \alpha(t-s) u_i(s) \approx \frac{1}{\lambda_i} \sum_k w_k \alpha(t-s_k) u_{ik} According to the Numerical Recipes [1] this interpolation should perform better that simple spline interpolation. However it turns that this is not the case in general (e.g. for exponential auto correlation functions the spline interpolation performs better). For that reason it might be usefull to set ngfac to 1 which will skip the integral interpolation 3) Use the eigenfunction on the fine grid to setup a cubic spline interpolation. 4) Use the spline interpolation to estimate the deviation :math:`\Delta(n)`. When using diff_method = 'full' the maximization is performed over all :math:`t'_i, s'_j` where :math:`t'_i = (t_i + t_{i+1})/2` and :math:`s'_i = (s_i + s_{i+1})/2` with :math:`i,j = 0, \, ...\, , ng_\mathrm{fine}-2`. It is expected that the interpolation error is maximal when beeing in between the reference points. 5) Now calculate the deviation :math:`\Delta(n)` for sequential n starting at n=0. Stop if :math:`\Delta(n) < tol`. If the deviation does not drop below tol for all :math:`0 \leq n < ng-1` increase ng as follows :math:`ng = 2*ng-1` and start over at 1). (This update scheme for ng asured that ng is odd which is needed for the 'simpson' and 'fourpoint' integration weights) .. note:: The scaling of the error of the various integration methods does not correspond to the scaling of the number of eigenfunctions to use in order to reconstruct the auto correlation function within a given tolerance. Surprisingly it turns out that in general the most trivial **mid-point method** performs quite well. If other methods suite bettern needs to be check in every case. [1] Press, W.H., Teukolsky, S.A., Vetterling, W.T., Flannery, B.P., 2007. Numerical Recipes 3rd Edition: The Art of Scientific Computing, Auflage: 3. ed. Cambridge University Press, Cambridge, UK ; New York. (pp. 990) """ time_start = time.time() if diff_method == 'full': pass elif diff_method == 'random': t_rand = np.random.rand(dm_random_samples) * t_max s_rand = np.random.rand(dm_random_samples) * t_max alpha_ref = corr(t_rand - s_rand) else: raise ValueError( "unknown diff_method '{}', use 'full' or 'random'".format( diff_method)) alpha_0 = np.abs(corr(0)) log.debug("diff_method: {}".format(diff_method)) time_fredholm = 0 time_calc_ac = 0 time_integr_intp = 0 time_spline = 0 time_calc_diff = 0 if isinstance(meth, str): meth = str_meth_to_meth(meth) k = 4 while True: k += 1 ng = 2**k + 1 log.info("check {} grid points".format(ng)) t, w = meth(t_max, ng) is_equi = is_axis_equidistant(t) log.debug("equidistant axis: {}".format(is_equi)) t0 = time.time() # efficient way to construct the r = _calc_corr_matrix(t, corr, is_equi) # auto correlation matrix r time_calc_ac += (time.time() - t0) t0 = time.time() # solve the dicrete fredholm equation _eig_val, _eig_vec = solve_hom_fredholm( r, w) # using integration weights w time_fredholm += (time.time() - t0) tfine = subdevide_axis(t, ngfac) # setup fine tsfine = subdevide_axis(tfine, 2) # and super fine time grid if is_equi: t0 = time.time() # efficient way to calculate the auto correlation alpha_k = _calc_corr_min_t_plus_t( tfine, corr) # from -tmax untill tmax on the fine grid time_calc_ac += (time.time() - t0 ) # needed for integral interpolation alpha_k_is_real = np.isrealobj(alpha_k) if alpha_k_is_real: print("alpha_k is real") if diff_method == 'full': if not is_equi: alpha_ref = corr(tsfine.reshape(-1, 1) - tsfine.reshape(1, -1)) else: alpha_ref = _calc_corr_matrix(tsfine, corr, is_equi=True) diff = -alpha_ref if relative_difference: abs_alpha_res = np.abs(alpha_ref) else: abs_alpha_res = 1 sqrt_lambda_ui_fine_all = [] for i in range(ng): evec = _eig_vec[:, i] if _eig_val[i] < 0: print(ng, i) break sqrt_eval = np.sqrt(_eig_val[i]) if ngfac != 1: t0 = time.time() # when using sqrt_lambda instead of lambda we get sqrt_lamda time u # which is the quantity needed for the stochastic process generation if not is_equi: sqrt_lambda_ui_fine = np.asarray([ np.sum(corr(ti - t) * w * evec) / sqrt_eval for ti in tfine ]) else: sqrt_lambda_ui_fine = stocproc_c.eig_func_interp( delta_t_fac=ngfac, time_axis=t, alpha_k=np.asarray(alpha_k, dtype=np.complex128), weights=w, eigen_val=sqrt_eval, eigen_vec=evec) time_integr_intp += (time.time() - t0) else: sqrt_lambda_ui_fine = evec * sqrt_eval sqrt_lambda_ui_fine_all.append(sqrt_lambda_ui_fine) # setup cubic spline interpolator t0 = time.time() #sqrt_lambda_ui_spl = tools.ComplexInterpolatedUnivariateSpline(tfine, sqrt_lambda_ui_fine, noWarning=True) if not is_equi: sqrt_lambda_ui_spl = tools.ComplexInterpolatedUnivariateSpline( tfine, sqrt_lambda_ui_fine, noWarning=True) else: sqrt_lambda_ui_spl = fcSpline.FCS(x_low=0, x_high=t_max, y=sqrt_lambda_ui_fine, ord_bound_apprx=2) time_spline += (time.time() - t0) # calculate the max deviation t0 = time.time() if diff_method == 'random': ui_t = sqrt_lambda_ui_spl(t_rand) ui_s = sqrt_lambda_ui_spl(s_rand) if alpha_k_is_real: diff += np.real(ui_t * np.conj(ui_s)) else: diff += ui_t * np.conj(ui_s) elif diff_method == 'full': ui_super_fine = sqrt_lambda_ui_spl(tsfine) diff += ui_super_fine.reshape(-1, 1) * np.conj( ui_super_fine.reshape(1, -1)) md = np.max(np.abs(diff) / abs_alpha_res) time_calc_diff += (time.time() - t0) log.debug("num evec {} -> max diff {:.3e}".format(i + 1, md)) if md < tol: time_total = time_calc_diff + time_spline + time_integr_intp + time_calc_ac + time_fredholm time_overall = time.time() - time_start time_rest = time_overall - time_total log.info( "calc_ac {:.3%}, fredholm {:.3%}, integr_intp {:.3%}, spline {:.3%}, calc_diff {:.3%}, rest {:.3%}" .format(time_calc_ac / time_overall, time_fredholm / time_overall, time_integr_intp / time_overall, time_spline / time_overall, time_calc_diff / time_overall, time_rest / time_overall)) log.info( "auto ng SUCCESSFUL max diff {:.3e} < tol {:.3e} ng {} num evec {}" .format(md, tol, ng, i + 1)) if ret_eigvals: return np.asarray( sqrt_lambda_ui_fine_all ), tfine, _eig_val[:len(sqrt_lambda_ui_fine_all)] else: return np.asarray(sqrt_lambda_ui_fine_all), tfine log.info("ng {} yields md {:.3e}".format(ng, md))