def _bound_cond_2_3_kind( self, cond_eq: sympy.Equality, coords: cs.CoordinateSystem ): T_indexes = [] axis_inx, axis_h_inx = None, None for axis, index in zip(coords.axises(), coords.indexes()): if axis != self.axis: T_indexes.append(index) else: axis_inx = 1 if self.bound_side == BoundSide.L else f"L1" axis_h_inx = 2 if self.bound_side == BoundSide.L else f"L2" T_indexes.append(axis_h_inx) integral_coef = 1 if self.bound_side == BoundSide.R else -1 X = sympy.IndexedBase("X") T = sympy.IndexedBase("T")[T_indexes] cond_coef = 1 / (self.a2 - self.a1 * (X[axis_h_inx] - X[axis_inx])) aps = -self.a1 * cond_coef * integral_coef con = calc_in_point(cond_eq.rhs, coords) * cond_coef * integral_coef if -self.a1 * integral_coef > 0: con += aps * T aps = sympy.simplify(0) self.CON = _to_fcode(con) self.APS = _to_fcode(aps) self.T = _to_fcode(T - (aps * T + con) * (X[axis_h_inx] - X[axis_inx]))
def expr(self): E = sp.IndexedBase('E') A = sp.IndexedBase('A') i, t = sp.symbols('i t', cls=sp.Idx) t1 = A[t] t2 = A[t, i] / A[t] t3 = E[t, i] / A[t, i] lhs = E m = 4 weights = self.weights(lhs, t, i, m) activity = t1 * weights structure = t2 * weights intensity = t3 * weights terms = [activity, structure, intensity] if self.model == 'multiplicative': effect = activity * structure * intensity elif self.model == 'additive': effect = activity + structure + intensity results = {str(t): t for t in terms} results['effect'] = effect for k in results.keys(): print('k:', k) sp.pprint(results[k]) return results
def _equation_processing(self, sympy_problem: problem.ProblemSympy): equation = sympy_problem.equation coords = sympy_problem.coordinate_system rc, kx, Sp_, Sc_ = sympy.symbols("rc,kx,Sp,Sc", cls=sympy.Wild) U_pattern = sympy.WildFunction("U_pattern", nargs=len(coords.axises())) x_var = sympy.Symbol("x") res = equation.match( sympy.Eq( rc * sympy.Derivative(U_pattern, t), kx * sympy.Derivative(U_pattern, x_var, x_var) + Sp_ * U_pattern + Sc_, ) ) self.GamX = res[kx] self.Rho = res[rc] xu_i = sympy.Symbol("I") XU = sympy.IndexedBase("XU") DT = sympy.Symbol("DT") T = sympy.IndexedBase("T")[xu_i] Sc = sympy.integrate(res[Sc_], (x_var, XU[xu_i], XU[xu_i + 1])) Sc = sympy.integrate(Sc, (t, "TIME", "TIME+DT")) / DT Sp = sympy.integrate(res[Sp_], (x_var, XU[xu_i], XU[xu_i + 1])) Sp = sympy.integrate(Sp, (t, "TIME", "TIME+DT")) / DT if res[Sp_].is_constant() and res[Sp_] > 0: Sc += Sp * T Sp = sympy.simplify(0) self.Sp = _to_fcode(sympy.simplify(calc_in_point(Sp, coords, use_U=True))) self.Sc = _to_fcode(sympy.simplify(calc_in_point(Sc, coords, use_U=True)))
def manual_g_computation(u_data, y_data, Eu_vals, tau): y = sy.IndexedBase('y') u = sy.IndexedBase('u') N = len(u_data) cross = sy.Sum(y[t] * u[t - tau], (t, 1, N)) / N logging.info(f'ĝ({tau}) = {cross}') inter_step = lambda y_data: (cross.doit().subs([ (u[i + 1], u_value) for i, u_value in enumerate(u_data) ]).subs([(y[i + 1], y_value) for i, y_value in enumerate(y_data)])) / Eu_vals g_sym = inter_step(map(lambda x: sy.symbols(str(x)), y_data)) #if g_sym.args: # g_sym = g_sym.args[0] logging.info(f'ĝ({tau}) = {g_sym}') g_val = inter_step(y_data) logging.info(f'ĝ({tau}) = {g_val}') if g_val.args: g_val = g_val.args[0] return g_val
def integral(self) -> Dict[ChannelID, ExpressionScalar]: expressions = {channel: 0 for channel in self._channels} for first_entry, second_entry in zip(self._entries[:-1], self._entries[1:]): substitutions = { 't0': first_entry.t.sympified_expression, 't1': second_entry.t.sympified_expression } v0 = sympy.IndexedBase( Broadcast(first_entry.v.underlying_expression, (len(self.defined_channels), ))) v1 = sympy.IndexedBase( Broadcast(second_entry.v.underlying_expression, (len(self.defined_channels), ))) for i, channel in enumerate(self._channels): substitutions['v0'] = v0[i] substitutions['v1'] = v1[i] expressions[ channel] += first_entry.interp.integral.sympified_expression.subs( substitutions) expressions = { c: ExpressionScalar(expressions[c]) for c in expressions } return expressions
def kk_ver1(Pos, SpCons, Len, eps=0.001): '関数リストを用いた部分最適化' t_start = time() nodes, dim = Pos.shape const = (SpCons, Len) P = sp.IndexedBase('P') K = sp.IndexedBase('K') L = sp.IndexedBase('L') i, j, d = [ sp.Idx(*spec) for spec in [('i', nodes), ('j', nodes), ('d', dim)] ] i_range, j_range, d_range = [(idx, idx.lower, idx.upper) for idx in [i, j, d]] #potential functionの用意 dist = sp.sqrt(sp.Sum((P[i, d] - P[j, d])**2, d_range)) Potential = 1 / 2 * K[i, j] * (dist - L[i, j])**2 E = sp.Sum(Potential, i_range, j_range).doit() #list of functions E_jac, E_hess = [], [] for m in range(nodes): variables = [P[m, d] for d in range(dim)] mth_jac, mth_hess = [ partial(sp.lambdify((K, L, P), f, dummify=False), *const) for f in [sp.Matrix([E]).jacobian(variables), sp.hessian(E, variables)] ] E_jac.append(mth_jac) E_hess.append(mth_hess) print('generating...', int(m / nodes * 100), '%', "\r", end="") print('derivative functions are generated:', time() - t_start, 's') ##Optimisation delta_max = sp.oo loops = 0 while (delta_max > eps): max_idx, delta_max = 0, 0 for m in range(nodes): mth_jac = E_jac[m] delta = la.norm(mth_jac(Pos)) if (delta_max < delta): delta_max = delta max_idx = m print(loops, 'th:', max_idx, ' delta=', delta_max) loops += 1 jac = E_jac[max_idx] hess = E_hess[max_idx] while (la.norm(jac(Pos)) > eps): delta_x = la.solve(hess(Pos), jac(Pos).flatten()) Pos[max_idx] -= delta_x print('Fitting Succeeded') print('Finish:', time() - t_start, 's') return Pos
def calc_in_point(expr, coords, use_U=False): subs_vars = {} for axis, index in zip(coords.axises(), coords.indexes()): if axis == t: continue axis_name = f"{str(axis).upper()}" + ("U" if use_U else "") subs_vars.update({axis: sympy.IndexedBase(axis_name)[index]}) T = sympy.IndexedBase("T")[coords.indexes()] U_pattern = sympy.WildFunction("U_pattern", nargs=len(coords.axises())) return expr.subs({t: "TIME", U_pattern: T, **subs_vars})
def formulate( cls, n_channels: int, n_poles: int, parametrize: bool = True, **kwargs: Any, ) -> sp.Matrix: r"""Implementation of :eq:`T-hat in terms of K-hat`. Args: n_channels: Number of coupled channels. n_poles: Number of poles. parametrize: Set to `False` if don't want to parametrize and only get symbols for the matrix multiplication of :math:`\boldsymbol{K}` and :math:`\boldsymbol{\rho}`. return_t_hat: Set to `True` if you want to get the Lorentz-invariant :math:`\boldsymbol{\hat{T}}`-matrix instead of the :math:`\boldsymbol{T}`-matrix from Eq. :eq:`K-hat and T-hat`. """ return_t_hat: bool = kwargs.pop("return_t_hat", False) t_matrix, k_matrix = cls._create_matrices(n_channels, return_t_hat) if not parametrize: return t_matrix phsp_factor: PhaseSpaceFactorProtocol = kwargs.get( "phsp_factor", PhaseSpaceFactor) s = sp.Symbol("s") m_a = sp.IndexedBase("m_a") m_b = sp.IndexedBase("m_b") return t_matrix.xreplace({ k_matrix[i, j]: cls.parametrization( i=i, j=j, s=s, pole_position=sp.IndexedBase("m"), pole_width=sp.IndexedBase("Gamma"), m_a=m_a, m_b=m_b, residue_constant=sp.IndexedBase("gamma"), n_poles=n_poles, pole_id=sp.Symbol("R", integer=True, positive=True), angular_momentum=kwargs.get("angular_momentum", 0), meson_radius=kwargs.get("meson_radius", 1), phsp_factor=phsp_factor, ) for i in range(n_channels) for j in range(n_channels) }).xreplace({ sp.Symbol(f"rho{i}"): phsp_factor(s, m_a[i], m_b[i]) for i in range(n_channels) })
def exponential_filter_algo(): x = sympy.IndexedBase('x') y = sympy.IndexedBase('y') k, alpha = sympy.symbols('k α') return Algorithm( inputs=x[k], states=y[k - 1], outputs=y[k], parameters=alpha, procedure=[ Assignment(lhs=y[k], rhs=alpha * x[k] + (1 - alpha) * y[k - 1]), Assignment(lhs=y[k - 1], rhs=y[k]) ], )
def kk_ver2(Pos, SpCons, Len): '2n変数の一括最適化' t_start = time() nodes, dim = Pos.shape const = (SpCons, Len) P = sp.IndexedBase('P') K = sp.IndexedBase('K') L = sp.IndexedBase('L') X = sp.IndexedBase('X') i, j, d = [ sp.Idx(*spec) for spec in [('i', nodes), ('j', nodes), ('d', dim)] ] i_range, j_range, d_range = [(idx, idx.lower, idx.upper) for idx in [i, j, d]] #potential functionの用意 print('reserving Potential function') dist = sp.sqrt(sp.Sum((P[i, d] - P[j, d])**2, d_range)).doit() E = sp.Sum(K[i, j] * (dist - L[i, j])**2, i_range, j_range) #jacobian,Hessianの用意 print('reserving jacobian and hessian') varP = [P[i, d] for i in range(nodes) for d in range(dim)] E_jac = sp.Matrix([E]).jacobian(varP) E_hes = sp.hessian(E, varP) print('generating derivative equation') varX = [X[i] for i in range(nodes * dim)] PX = np.array([X[i * dim + j] for i in range(nodes) for j in range(dim)]).reshape(nodes, dim) E_X = E.replace(K, SpCons).replace(L, Len).replace(P, PX).doit() E_jac_X = sp.Matrix([E_X]).jacobian(varX) E_hes_X = sp.hessian(E_X, varX) print('generating derivative function') F, G, H = [sp.lambdify(X, f) for f in [E_X, E_jac_X, E_hes_X]] print('fitting') res = minimize(F, Pos, jac=lambda x: np.array([G(x)]).flatten(), hess=H, method='trust-ncg') print('[time:', time() - t_start, 's]') return res.x.reshape(nodes, dim)
def expansion_slow(maxorder=4): """ This is the naive (and very slow) implementation of the ("hard mode" part of the) expansion. This function is intended to be a debugging tool. Do not use this in production code. Parameters ---------- maxorder : int Highest order of \alpha taken into account in the expansion. See also -------- - expansion """ res = 0 f = sp.IndexedBase('f') kres = 0 for k in range(maxorder): kres += dalpha(k)/sp.factorial(k)*((-sp.log(N))**k) for i in xrange(maxorder): res += sp.Indexed(f, i)*(kres**(i+1)) return res
def _array_symbols(self, f_dir, inverse, index): if (f_dir, inverse) in self._trivial_index_translations: translated_index = index else: index_array_symbol = self._index_array_symbol(f_dir, inverse) translated_index = sp.IndexedBase(index_array_symbol, shape=(1,))[index] self._required_index_arrays.add((f_dir, inverse)) if (f_dir, inverse) in self._trivial_offset_translations: offsets = (0, ) * self._dim else: offset_array_symbols = self._offset_array_symbols(f_dir, inverse) offsets = tuple(sp.IndexedBase(s, shape=(1,))[index] for s in offset_array_symbols) self._required_offset_arrays.add((f_dir, inverse)) return {'index': translated_index, 'offsets': offsets}
def pde_model_disc(N, dx): if dx < 0: raise ValueError("dx must be non-negative") if N < 1: raise ValueError("N must be positive") x = sympy.Symbol('x') r = sympy.Symbol('r') cl = sympy.Symbol('cl') L = sympy.Symbol('L') i = sympy.Idx('i') c = sympy.IndexedBase('c') m = model.Model() m.name = 'laplace discretised' m.parameters = {cl, L} m.solution_variables = {c[i]} m.bounds = sympy.And(i >= 0, i <= N) m.eqs = { (sympy.And(i > 0, i < N), sympy.Eq((c[i+1] - 2*c[i] - c[i-1])/(dx**2), 0)), (sympy.Eq(i, 0), sympy.Eq(c[i], cl)), (sympy.Eq(i, N), sympy.Eq((c[i] - c[i-1])/dx, 0)) } return m
def indexed(name, shape, index=(iv, ix, iy, iz), list_ind=None, ranges=None, permutation=None, remove_ind=None): if not permutation: permutation = range(len(index)) output = sp.IndexedBase(name, set_order(shape, permutation, remove_ind)) if ranges: ind = [ set_order([k] + index[1:], permutation, remove_ind) for k in ranges ] return sp.Matrix([output[i] for i in ind]) elif list_ind is not None: ind = [] indices = index[1:] for il, l in enumerate(list_ind): #pylint: disable=invalid-name tmp_ind = [] for ik, k in enumerate(l): #pylint: disable=invalid-name tmp_ind.append(indices[ik] + int(k)) ind.append(set_order([il] + tmp_ind, permutation, remove_ind)) return sp.Matrix([output[i] for i in ind]) else: return output[set_order(index, permutation, remove_ind)]
def sympify(expr: Union[str, Number, sympy.Expr, numpy.str_], **kwargs) -> sympy.Expr: if isinstance(expr, numpy.str_): # putting numpy.str_ in sympy.sympify behaves unexpected in version 1.1.1 # It seems to ignore the locals argument expr = str(expr) if isinstance(expr, (tuple, list)): expr = numpy.array(expr) try: return sympy.sympify(expr, **kwargs, locals=sympify_namespace) except TypeError as err: if True: #err.args[0] == "'Symbol' object is not subscriptable": indexed_base = get_subscripted_symbols(expr) return sympy.sympify( expr, **kwargs, locals={ **{ k: k if isinstance(k, Broadcast) else sympy.IndexedBase(k) for k in indexed_base }, **sympify_namespace }) else: raise
def generate_partial_symbolic(g, alpha=None, m=None): """what happens if we force coefficient evaluation, AND rational g... then evaluate for t2? Can we keep precision all the way through to jax? """ if m is None: m = sym.load_from_pickle() if alpha is None: alpha = s.Rational(5534, 10000) n = 39 t1 = 0 t = s.IndexedBase('t') xs = get_sympy_symbolic(m, n, g, alpha, t1, t[2]) expr = [v.evalf(100) for i, v in sorted(xs.items())] print(expr[38]) f = s.lambdify([t[2]], expr, 'jax') def ret(t2): return np.array(f(t2)) return j.jit(ret)
def descente_de_gradient(Xn0, x1, y1, u, v, l, t1): Xn = sympy.IndexedBase('Xn') J = Jacobien(Xn, x1, y1, u, v, l, t1) epsilon = .5 Xn1 = [0] * (2 * n + 6) #doit etre (tres ?) different de Xn0. cpt = 0 while sum(abs(abs(np.array(Xn0)) - abs(np.array(Xn1)))) > epsilon and cpt < 20: # print(Xn0, len(Xn0), type(Xn0), sum(abs(abs(np.array(Xn0)) - abs(np.array(Xn1))))) print(sum(abs(abs(np.array(Xn0)) - abs(np.array(Xn1))))) if cpt != 0: Xn0 = list(Xn1) J1 = J.subs([(Xn[i], Xn0[i]) for i in range(l, 2 * n + 6, 1)]) taille = J1.shape J2 = np.zeros(taille) for i in range(taille[0]): for j in range(taille[1]): J2[i, j] = sympy.N(J1[i, j]) J2 = np.matrix(J2) J3 = (((J2.T).dot(J2)).I).dot(J2.T) fXn = f(Xn, x1, y1, u, v, l, t1) fXn = [ fXn[j].subs([(Xn[i], Xn0[i]) for i in range(l, 2 * n + 6, 1)]) for j in range(len(fXn)) ] Xn1 = t1[:l] + list(np.array(Xn0[l::] - J3.dot(fXn))[0]) cpt += 1 return (Xn0)
def semi_symbolic_stats(): import sympy as sym X = sym.IndexedBase('X') Y = sym.IndexedBase('Y') n_ = 5 m_ = 3 n, m = sym.symbols('n, m') Xs = [X[i] for i in range(n_)] Ys = [Y[j] for j in range(m_)] Zs = Xs + Ys mX = sym.symbols('mX') mY = sym.symbols('mY') vX = sym.symbols('vX') vY = sym.symbols('vY') mean_X = sum(Xs) / n mean_Y = sum(Ys) / m mean_Z = sum(Zs) / (m + n) mZ = ((mX * n) + (mY * m)) / (m + n) sym.pprint(mZ) sym.pprint(mZ.subs({mX: mean_X, mY: mean_Y})) var_X = sum((x - mX)**2 for x in Xs) / n var_Y = sum((y - mY)**2 for y in Ys) / m var_Z = sum((z - mZ)**2 for z in Zs) / (m + n) vZ = (((n * vX) + (m * vY)) + ((n * (mX - mZ)**2) + (m * (mY - mZ)**2))) / (m + n) sym.pprint(vZ) sym.pprint(vZ.simplify()) sym.pprint(vZ.factor()) # sym.pprint(var_Z.subs({mZ: mean_Z})) sym.pprint(var_X) sym.pprint(var_Y) sym.pprint(var_Z) std_X = sym.sqrt(var_X) std_Y = sym.sqrt(var_Y) mean_Z = ((mean_X * n) + (mean_Y * m)) / (n + m) pass
def generate_functions(m, n): g = s.symbols('g') t = s.IndexedBase('t') expr = [v for i, v in sorted(m.items()) if i < n] f = s.lambdify([g, t[1], t[2]], expr) return f
def smc(model, t0=0): import sympy from sympy.printing import fcode cmodel = model.compile_model() template = fortran_model #open('fortran_model.f90').read() write_prior_file(cmodel.prior, '.') system_matrices = model.python_sims_matrices(matrix_format='symbolic') npara = len(model.parameters) para = sympy.IndexedBase('para', shape=(npara + 1, )) fortran_subs = dict( zip([sympy.symbols('garbage')] + model.parameters, para)) fortran_subs[0] = 0.0 fortran_subs[1] = 1.0 fortran_subs[100] = 100.0 fortran_subs[2] = 2.0 fortran_subs[400] = 400.0 fortran_subs[4] = 4.0 context = dict([(p.name, p) for p in model.parameters + model['other_para']]) context['exp'] = sympy.exp context['log'] = sympy.log to_replace = {} for p in model['other_para']: to_replace[p] = eval(str(model['para_func'][p.name]), context) to_replace = list(to_replace.items()) print(to_replace) from itertools import combinations, permutations edges = [(i, j) for i, j in permutations(to_replace, 2) if type(i[1]) not in [float, int] and i[1].has(j[0])] from sympy import default_sort_key, topological_sort para_func = topological_sort([to_replace, edges], default_sort_key) to_write = [ 'GAM0', 'GAM1', 'PSI', 'PPI', 'self%QQ', 'DD2', 'self%ZZ', 'self%HH' ] fmats = [ fcode((mat.subs(para_func)).subs(fortran_subs), assign_to=n, source_format='free', standard=95, contract=False) for mat, n in zip(system_matrices, to_write) ] sims_mat = '\n\n'.join(fmats) template = template.format(model=model, yy=cmodel.yy, p0='', t0=t0, sims_mat=sims_mat) return template
def __init__(self, name, num_prms, expr): self.name = name self.HEADER = self.HEADER.format(name) self.num_prms = num_prms self.expr = _sp.simplify( _sp.sympify( expr, locals={'prms': _sp.IndexedBase('prms', shape=num_prms)})) self.calculate_derivatives()
def kk_ver3(Pos, SpCons, Len, eps=0.00001): '頂点の匿名性から微分関数を1つにまとめたもの' start = time() nodes, dim = Pos.shape ni = nodes - 1 X = sp.IndexedBase('X') # 動かす頂点 P = sp.IndexedBase('P') # 動かさない頂点 Ki = sp.IndexedBase('Ki') # 動かす頂点に関するばね定数 Li = sp.IndexedBase('Li') # 動かす頂点に関する自然長 j, d = [sp.Idx(*spec) for spec in [('j', ni), ('d', dim)]] j_range, d_range = [(idx, idx.lower, idx.upper) for idx in [j, d]] #potential functionの用意 dist = sp.sqrt(sp.Sum((X[d] - P[j, d])**2, d_range)).doit() Ei = sp.Sum(Ki[j] * (dist - Li[j])**2, j_range) #jacobian,Hessianの用意 varX = [X[d] for d in range(dim)] Ei_jac, Ei_hess = [ sp.lambdify((X, P, Ki, Li), sp.simplify(f), dummify=False) for f in [sp.Matrix([Ei]).jacobian(varX), sp.hessian(Ei, varX)] ] print('generate function:', time() - start, 's') start = time() ##Optimisation delta_max = sp.oo xpkl = partial(_xpkl, P=Pos, K=SpCons, L=Len, n=nodes) while (delta_max > eps): # 最も改善すべき頂点の選択 norms = np.array( list(map(lambda m: la.norm(Ei_jac(*xpkl(m))), range(nodes)))) max_idx, delta_max = norms.argmax(), norms.max() # Newton法で最適化 xpkl_m = xm, pm, km, lm = xpkl(max_idx) while (la.norm(Ei_jac(*xpkl_m)) > eps): delta_x = la.solve(Ei_hess(*xpkl_m), Ei_jac(*xpkl_m).flatten()) xm -= delta_x print('Finish:', time() - start, 's') return Pos
def __new__(cls, base, linearized_index, field, offsets, idx_coordinate_values): if not isinstance(base, sp.IndexedBase): base = sp.IndexedBase(base, shape=(1, )) obj = super(ResolvedFieldAccess, cls).__new__(cls, base, linearized_index) obj.field = field obj.offsets = offsets obj.idx_coordinate_values = idx_coordinate_values return obj
def manual_y_computation(weighting_seq, input_signal, T): logging.info('ŷ(t) = ĝ(t) * u(t)') g_ = sy.IndexedBase('ĝ') u = sy.IndexedBase('u') y_ = sy.Sum(g_[k] * u[T - k], (k, 0, T)) logging.info(f'ŷ({T}) = {y_}') inter_step = lambda weighting_seq: (y_.doit().subs([ (u[i], u_value) for i, u_value in enumerate(input_signal) ]).subs([(g_[i], g_value) for i, g_value in enumerate(weighting_seq)])) g_sym = inter_step(map(lambda x: sy.symbols(str(x)), weighting_seq)) logging.info(f'ŷ({T}) = {g_sym}') g_val = inter_step(weighting_seq) logging.info(f'ŷ({T}) = {g_val}') return g_val
def solve_constraint(self, cond, vector): """Solve the constraint with a vector to give the resulting set.""" x = sp.IndexedBase('x') expr = eval(cond) for i, j in enumerate(vector): expr = expr.subs(x[i], j) if expr == True: return True simplified = sp.solve(expr) return simplified.as_set()
def disabled(): s_j = sp.Symbol("j", integer=True) r2 = sp.IndexedBase(sp.Function("r")(s_j / 4)) eq_27_1_bernstein = sp.Sum( sp.binomial(4, s_j) * s_t**s_j * (1 - s_t)**(4 - s_j) * r2[s_i], (s_j, 0, 4)) display(eq_27_1_bernstein) eq_27_1_bernstein = eq_27_1_bernstein.replace( r2[s_i], eq_27_1.rhs.subs(s_t, s_j / 4).simplify()) display(eq_27_1_bernstein) display(eq_27_1_bernstein.doit().simplify().factor().expand(s_t))
def manual_Eu_computation(u_data): N = len(u_data) u = sy.IndexedBase('u') Eu = sy.Sum(u[t]**2, (t, 1, N)) / N logging.info(f'Eu²(t) = {Eu.doit()}') Eu_vals = Eu.doit().subs([(u[i + 1], u_value) for i, u_value in enumerate(u_data)]) logging.info(f'Eu²(t) = {Eu_vals}') return Eu_vals
def t_k_plus_three(k): """Generates the (k + 3)th term of the loop equation recursion for the single matrix case. """ alpha, g = s.symbols('alpha g') l = s.symbols('l', cls=s.Idx) t = s.IndexedBase('t') term = 1 / g * (s.Sum(alpha**4 * t[l] * t[k - l - 1], (l, 0, k - 1)) - (alpha**2 * t[k + 1])) return t[k + 3], term
def formulate( cls, n_channels: int, n_poles: int, parametrize: bool = True, **kwargs: Any, ) -> sp.Matrix: f_vector, k_matrix, p_vector = cls._create_matrices(n_channels) if not parametrize: return f_vector s = sp.Symbol("s") pole_position = sp.IndexedBase("m") pole_width = sp.IndexedBase("Gamma") residue_constant = sp.IndexedBase("gamma") pole_id = sp.Symbol("R", integer=True, positive=True) return f_vector.xreplace({ k_matrix[i, j]: NonRelativisticKMatrix.parametrization( i=i, j=j, s=s, pole_position=pole_position, pole_width=pole_width, residue_constant=residue_constant, n_poles=n_poles, pole_id=pole_id, ) for i in range(n_channels) for j in range(n_channels) }).xreplace({ p_vector[i]: cls.parametrization( i=i, s=sp.Symbol("s"), pole_position=pole_position, pole_width=pole_width, residue_constant=residue_constant, beta_constant=sp.IndexedBase("beta"), n_poles=n_poles, pole_id=pole_id, ) for i in range(n_channels) })
def test_callback_alt_two(): d = sympy.IndexedBase('d') e = 3 * d[0] * d[1] f = g.llvm_callable([n, d], e, callback_type='scipy.integrate.test') m = ctypes.c_int(2) array_type = ctypes.c_double * 2 inp = {d[0]: 0.2, d[1]: 1.7} array = array_type(inp[d[0]], inp[d[1]]) jit_res = f(m, array) res = float(e.subs(inp).evalf()) assert isclose(jit_res, res)