def Taylor_polynomial_sympy(function_expression, variable_list, evaluation_point, degree): """ Mathematical formulation reference: https://math.libretexts.org/Bookshelves/Calculus/Supplemental_Modules_(Calculus)/Multivariable_Calculus/3%3A_Topics_in_Partial_Derivatives/Taylor__Polynomials_of_Functions_of_Two_Variables :param function_expression: Sympy expression of the function :param variable_list: list. All variables to be approximated (to be "Taylorized") :param evaluation_point: list. Coordinates, where the function will be expressed :param degree: int. Total degree of the Taylor polynomial :return: Returns a Sympy expression of the Taylor series up to a given degree, of a given multivariate expression, approximated as a multivariate polynomial evaluated at the evaluation_point """ from sympy import factorial, Matrix, prod import itertools n_var = len(variable_list) point_coordinates = [(i, j) for i, j in (zip(variable_list, evaluation_point))] # list of tuples with variables and their evaluation_point coordinates, to later perform substitution deriv_orders = list(itertools.product(range(degree + 1), repeat=n_var)) # list with exponentials of the partial derivatives deriv_orders = [deriv_orders[i] for i in range(len(deriv_orders)) if sum(deriv_orders[i]) <= degree] # Discarding some higher-order terms n_terms = len(deriv_orders) deriv_orders_as_input = [list(sum(list(zip(variable_list, deriv_orders[i])), ())) for i in range(n_terms)] # Individual degree of each partial derivative, of each term polynomial = 0 for i in range(n_terms): partial_derivatives_at_point = function_expression.diff(*deriv_orders_as_input[i]).subs(point_coordinates) # e.g. df/(dx*dy**2) denominator = prod([factorial(j) for j in deriv_orders[i]]) # e.g. (1! * 2!) distances_powered = prod([(Matrix(variable_list) - Matrix(evaluation_point))[j] ** deriv_orders[i][j] for j in range(n_var)]) # e.g. (x-x0)*(y-y0)**2 polynomial += partial_derivatives_at_point / denominator * distances_powered return polynomial
def test_indexed_idx_sum(): i = symbols('i', cls=Idx) r = Indexed('r', i) assert Sum(r, (i, 0, 3)).doit() == sum([r.xreplace({i: j}) for j in range(4)]) assert Product(r, (i, 0, 3)).doit() == prod( [r.xreplace({i: j}) for j in range(4)]) j = symbols('j', integer=True) assert Sum(r, (i, j, j + 2)).doit() == sum( [r.xreplace({i: Idx(j + k)}) for k in range(3)]) assert Product(r, (i, j, j + 2)).doit() == prod( [r.xreplace({i: Idx(j + k)}) for k in range(3)]) k = Idx('k', range=(1, 3)) A = IndexedBase('A') assert Sum(A[k], k).doit() == sum([A[Idx(j, (1, 3))] for j in range(1, 4)]) assert Product(A[k], k).doit() == prod([A[Idx(j, (1, 3))] for j in range(1, 4)]) raises(ValueError, lambda: Sum(A[k], (k, 1, 4))) raises(ValueError, lambda: Sum(A[k], (k, 0, 3))) raises(ValueError, lambda: Sum(A[k], (k, 2, oo))) raises(ValueError, lambda: Product(A[k], (k, 1, 4))) raises(ValueError, lambda: Product(A[k], (k, 0, 3))) raises(ValueError, lambda: Product(A[k], (k, 2, oo)))
def prod(self, axis=None): if axis is None: return sym.prod(self.flat) elif isinstance(axis, (tuple, list, int)): return sym.prod(slice_iterator(self, axis)) else: raise ValueError(f'Unrecognised axis type {type(axis)}.')
def update_coefficients(coefs, p_in, p_out): """ Returns a list of updated coefficients. The list represents the coefficients of a node obtained from one tuple of coefficients of an ancestral node. Parameters ---------- coefs: tuple of floats or sympy.symbol objects Ancestral coefficients. A length 3 tuple (A, n, B). p_in: sympy.symbol object or float In-flow intensity (i.e. intensity of reaction from ancestor to current node) p_out: Out-flow intensity, i.e. sum of intensities of reactions of the current node """ A, n, B = coefs updated_coefs = [] if p_out == B: updated_coefs.append((A * p_in / (n + 1), n + 1, p_out)) else: for k in range(n + 1): updated_coefs.append((A * p_in * sympy.prod( (i for i in range(k + 1, n + 1))) * ((-1)**(n - k)) / ((p_out - B)**(n - k + 1)), k, B)) updated_coefs.append((A * p_in * sympy.prod( (i for i in range(1, n + 1))) * ((-1)**(n + 1)) / ((p_out - B)**(n + 1)), 0, p_out)) return updated_coefs
def factor_linear_quadratic(f, x, as_list=False): """ Factors a polynomial f(x) with real coefficients into linear and quadratic terms. Will not work if f depends on variables other than x. Returns a tuple with the constant coefficient and a list of the factors if as_list is True. """ factors = [] origin_roots = 0 for root, multiplicity in sp.roots(f, x).items(): if root == 0: factors.append((x, multiplicity)) origin_roots = multiplicity elif root.is_real: factors.append((x - root, multiplicity)) elif sp.im(root) > 0: factors.append((sp.expand( (x - root) * (x - sp.conjugate(root))), multiplicity)) coefficient = (f / x ** origin_roots).subs(x, 0) / \ sp.prod([factor.subs(x, 0) ** multiplicity for factor, multiplicity in factors if factor != x]) if as_list: return coefficient, factors return coefficient * sp.prod( [factor**multiplicity for factor, multiplicity in factors])
def _list_commutator(self, a, b): from sympy import prod for i, A in enumerate(a): for j, B in enumerate(b): yield prod(a[:i], self.one) * prod( b[:j], self.one) * self._base_commutator(A, B) * prod( b[j + 1:], self.one) * prod(a[i + 1:], self.one)
def ReduceExpr(expr): if isinstance(expr, ( TensorFunction, TensorProductFunction, )): return expr if isinstance(expr, Mul): # Look for a Tensor here, and multiply everything else by it. # First, try to ReduceExpr on everything that will be going # into this calculation in hopes of getting some tensors. args = list(o if o.is_Atom or isinstance(o, ( TensorFunction, TensorProductFunction, )) else ReduceExpr(o) for o in expr.args) tensors = prod(t for t in args if isinstance(t, ( TensorFunction, TensorProductFunction, ))) others = prod(o for o in args if not isinstance(o, ( TensorFunction, TensorProductFunction, ))) if tensors == 1: return others else: return tensors * others if isinstance(expr, Add): return sum(ReduceExpr(arg) for arg in expr.args) return expr
def probability(self, event): # Specializations for optimization. if isinstance(event, FiniteProductEvent): assert len(self._spaces) == len(event.events) return sympy.prod([ space.probability(event_slice) for space, event_slice in zip(self._spaces, event.events) ]) if isinstance(event, CountLevelSetEvent) and self.all_spaces_equal(): space = self._spaces[0] counts = event.counts probabilities = { value: space.probability(DiscreteEvent({value})) for value in six.iterkeys(counts) } num_events = sum(six.itervalues(counts)) assert num_events == len(self._spaces) # Multinomial coefficient: coeff = (sympy.factorial(num_events) / sympy.prod( [sympy.factorial(i) for i in six.itervalues(counts)])) return coeff * sympy.prod([ pow(probabilities[value], counts[value]) for value in six.iterkeys(counts) ]) raise ValueError('Unhandled event type {}'.format(type(event)))
def __or__(self, B): if (B.rank != self.rank): raise ValueError( "Cannot contract rank-{0} tensor with rank-{1} tensor.".format( self.rank, B.rank)) if (isinstance(B, TensorProductFunction)): if (self.symmetric): from itertools import permutations # It suffices to just iterate over rearrangements of `self`. coefficient = simplify(self.coefficient * B.coefficient * frac(1, factorial(self.rank))) if (coefficient == 0): return sympify(0) return simplify(coefficient * sum([ prod( [v | w for v, w in zip(self.ordered_as(index_set), B)]) for index_set in permutations(range(self.rank)) ])) return (self.coefficient * B.coefficient) * prod( [v | w for v, w in zip(self, B)]) else: try: return sum([(self | t_p) for t_p in B]) except AttributeError: raise ValueError( "Don't know how to contract TensorProductFunction with '{0}'" .format(type(B)))
def is_reversible(self) -> bool: """Checks symbolically whether or not the Markov chain is reversible for any set of non-zero transition rate values. We assume that all transition rates are always non-zero and follow Colquhoun et al. (2004) https://doi.org/10.1529/biophysj.103. :return: A bool which is true if Markov chain is reversible (assuming non-zero transition rates). """ # Digraph must be strongly connected in order for the chain to be # reversible. In other words it must be possible to transition from any # state to any other state in some finite number of transitions if not nx.algorithms.components.is_strongly_connected(self.graph): return False undirected_graph = self.graph.to_undirected(reciprocal=False, as_view=True) cycle_basis = nx.cycle_basis(undirected_graph) for cycle in cycle_basis: cycle.append(cycle[0]) logging.debug("Checking cycle {}".format(cycle)) iter = list(zip(cycle, itertools.islice(cycle, 1, None))) forward_rate_list = [ sp.sympify(self.graph.get_edge_data(frm, to)['rate']) for frm, to in iter ] backward_rate_list = [ sp.sympify(self.graph.get_edge_data(frm, to)['rate']) for to, frm in iter ] logging.debug("Rates moving forwards around the cycle are: %s", forward_rate_list) logging.debug("Rates moving backwards around the cycle are: %s", backward_rate_list) if None in backward_rate_list or None in forward_rate_list: logging.debug("Not all rates were specified.") return False forward_rate_product = sp.prod(forward_rate_list) backward_rate_product = sp.prod(backward_rate_list) if (forward_rate_product - backward_rate_product).evalf() != 0: return False return True
def __pow__(self, e, z=None): """ Perform multinomial expansion and return terms of order less than `self.order`. """ data = {} exps,coeffs = zip(*(self.d.items())) m = len(exps) print "EXPONENT:",e multinoms = sympy.multinomial.multinomial_coefficients_iterator(m,int(e)) for k,mcoeff in multinoms: exp = sum( expi*ki for expi,ki in zip(exps,k) ) if exp < self.order: try: coeff = sympy.simplify( mcoeff * sympy.prod(ci**ki for ci,ki in zip(coeffs,k)) ) coeff = sympy.simplify(coeff + data[exp]) except KeyError: data[exp] = coeff return PuiseuxSeries(data, self.alpha, self.order, self.var)
def gen_all_signatures(N, small_primes): signatures = {} for count in range(1, len(small_primes)): signature = (1,) * count smallest = sympy.prod(small_primes[:count]) if smallest > N: break signatures[smallest] = signature assert smallest > N while signatures: smallest = min(signatures) sig = signatures.pop(smallest) yield sig, smallest # Increment any of the powers in signature for power in set(sig): # Replace an instance of power with power+1 index = sig.index(power) new_signature = sig[:index] + (power + 1,) + sig[index+1:] new_signature = tuple(sorted(new_signature, reverse=True)) smallest = prod(zip(small_primes, new_signature)) if smallest <= N: signatures[smallest] = new_signature
def _polynomial_coeffs_with_roots(roots, scale_entropy): """Returns a polynomial with the given roots. The polynomial is generated by expanding product_{root in roots} (x - root), and then (1) scaling by the coefficients so they are all integers with lcm 1, and then (2) further scaling the coefficients by a random integer or rational with `scale_entropy` digits. Args: roots: List of values. scale_entropy: Float; entropy of the random coefficient scaling. Returns: List of coefficients `coeffs`, such that `coeffs[i]` is the coefficient of variable ** i. """ variable = sympy.Symbol('x') # doesn't matter, only use coefficients polynomial = sympy.Poly(sympy.prod([variable - root for root in roots])) coeffs_reversed = polynomial.all_coeffs() assert len(coeffs_reversed) == len(roots) + 1 coeffs = list(reversed(coeffs_reversed)) # Multiply terms to change rationals to integers, and then maybe reintroduce. lcm = sympy.lcm([sympy.denom(coeff) for coeff in coeffs]) if scale_entropy > 0: while True: scale = number.integer_or_rational(scale_entropy, signed=True) if scale != 0: break else: scale = 1 return [coeff * scale * lcm for coeff in coeffs]
def __init__(self, dims): """Constructs a shape whose i-th dim is dims[i]. Each dim can be one of the following types: integer: represents the dimension is a known and fixed. string: represents the dimension is an unknown and a sympy dummy symbol is used to represent it. Also note that contents of strings only matter for logging/printing. Even if the same string is given on multiple dimensions, it doesn't mean that they are the same. sympy expression: represents a dimension which possibly depends on dimensions of other shapes. Args: dims: A list of either integer, string or sympy.Symbol. """ self._shape = [] for x in dims: assert x is not None, str(dims) if isinstance(x, six.string_types): # NOTE: Dummy(x) creates a unique symbol. I.e., the value of x has no # meaning except for printing, etc. self._shape.append(sympy.Dummy(x, integer=True)) else: # Converts x to a sympy type. E.g., int to sympy.Integer. self._shape.append(sympy.sympify(x)) self._size = sympy.prod(self._shape)
def newton_cotes_closed(index, **kwargs): """ Closed Newton-Cotes formulae. <https://en.wikipedia.org/wiki/Newton%E2%80%93Cotes_formulas#Closed_Newton.E2.80.93Cotes_formulae>, <http://mathworld.wolfram.com/Newton-CotesFormulas.html>. """ points = numpy.linspace(-1.0, 1.0, index + 1) degree = index + 1 if index % 2 == 0 else index # Formula (26) from # <http://mathworld.wolfram.com/Newton-CotesFormulas.html>. # Note that Sympy carries out all operations in rationals, i.e., # _exactly_. Only at the end, the rational is converted into a float. n = index weights = numpy.empty(n + 1) t = sympy.Symbol("t") for r in range(n + 1): # Compare with get_weights(). f = sympy.prod([(t - i) for i in range(n + 1) if i != r]) alpha = ( 2 * (-1) ** (n - r) * sympy.integrate(f, (t, 0, n), **kwargs) / (math.factorial(r) * math.factorial(n - r)) / index ) weights[r] = alpha return C1Scheme("Newton-Cotes (closed)", degree, weights, points)
def conductances(model): """Extract intrinsic conductance terms from the model This function works by dividing out any arguments from currents that depend on the voltage """ V = sp.Symbol("V") return (sp.prod(t for t in term.args if V not in t.free_symbols) for term in currents(model))
def multi_Hermite_polynomials(d, p, q): # 1-dimensional recursive & symbolic Hermite polynomials x = sym.symbols('x') # get 1-D base polynomials (Hermite) He = scalar_Hermite_polynomials(p) # get truncated index set alpha_trunc = truncated_multi_index(d, p, q) # d-dimensional Hermite polynomials according to alpha_trunc n_psi = np.shape(alpha_trunc)[0] X = sym.symbols('x0:%d' % d) #np.sqrt(np.math.factorial(i)) psi = [] #f_psi = [] for i in range(n_psi): norm = np.prod( [np.math.factorial(alpha_trunc[i, j]) for j in range(d)])**.5 psi.append( sym.prod([He[alpha_trunc[i, j]].subs(x, X[j]) for j in range(d)]) / norm) # f_psi.append(lambdify([X], psi[i], 'numpy')) return (np.array(psi), alpha_trunc)
def _f_gen(l, lp, i2, i1, nu, lam2, lam1): return Piecewise( (0, (nu > l + lp) | (nu > lam1 + lam2) | ((lam1 > 2 * i2) & (lam2 > 2 * i2))), ((sqrt(prod([2 * i + 1 for i in locals().values()])) * (-1)**(lp + lam1 + lam2 + 1) * Wigner3j(l, 1, lp, -1, nu, 0) * Wigner9j(i2, l, i1, i2, lp, i1, lam2, nu, lam1)), True))
def integrate_monomial_over_unit_simplex(k, symbolic=False): """The integrals of monomials over the standard triangle and tetrahedron are given by \\int_T x_0^k0 * x1^k1 = (k0!*k1!) / (2+k0+k1)!, \\int_T x_0^k0 * x1^k1 * x2^k2 = (k0!*k1!*k2!) / (4+k0+k1+k2)!, see, e.g., A set of symmetric quadrature rules on triangles and tetrahedra, Linbo Zhang, Tao Cui and Hui Liu, Journal of Computational Mathematics, Vol. 27, No. 1 (January 2009), pp. 89-96, <https://www.jstor.org/stable/43693493>. See, e.g., <https://math.stackexchange.com/q/207073/36678> for a formula in all dimensions. """ if symbolic: return sympy.prod([sympy.gamma(kk + 1) for kk in k]) / sympy.gamma(sum(k) + len(k) + 1) # exp-log to account for large values in numerator and denominator # import scipy.special return math.exp( math.fsum([scipy.special.gammaln(kk + 1) for kk in k]) - scipy.special.gammaln(sum([kk + 1 for kk in k]) + 1))
def find_factor(self, n): """ Алгоритм Ленстры Parameters ---------- n : Union[int, gmpy2.mpz] Факторизируемое число Returns ------- gmp2.mpz Делитель или 1 """ random_curve = self.generate_random_curve(n) random_point = EllipticPoint(random_curve.x_point, random_curve.y_point, curve=random_curve) r_i = np.arange(2, self.omega_bound) m_i = np.floor( np.log(self.nu_bound + 2 * np.sqrt(self.nu_bound) + 1) / np.log(r_i)) max_power = int(prod(r_i**m_i)) i = 1 while i < max_power: i += 1 t = random_point**i if not is_point(t): return t if isinstance(t, IdentityEllipticPoint): return None return None
def integer_array_product(array): """ This produces the product of every value within an array. Sympy is used because of their superior handling of precision compared to Numpy. The array must be integers. If it is not, then they are converted. Parameters ---------- array : ndarray The array by which all elements will be multiplied together. Returns ------- product : float The product of the multiplication of the array. """ # Reformat the array into a Sympy float array. sympy_array = sy.Array(array) sympy_array = sympy_array.applyfunc(lambda x: sy.Integer(x)) # Flatten the array. size = len(sympy_array) flat_sympy_array = sympy_array.reshape(size) # And finally, compute the product. product = sy.prod(flat_sympy_array) return product
def get_upper_degree(self): list_of_products = [self.variables[i] ** self.max_degrees[i] for i in range(self.n)] product = prod(list_of_products) product = Poly(product).monoms() return monomial_deg(*product)
def cumulant_expansion(ops, order, divide=False): # Get all combinations of length order combs_order = list(itertools.combinations(ops, order)) # Get all remainding combinations combs_remain = list(itertools.combinations(ops, len(ops) - order)) combs_remain.reverse() ops_new = [] if order == 1: for c in combs_order: op_str = str(c[0]) if "adjoint" in op_str: op_str = op_str.replace("adjoint(", "")[0:-1] op_str = "<" + op_str + ">" ops_new.append(conjugate(Symbol(op_str))) else: op_str = "<" + op_str + ">" ops_new.append(Symbol(op_str)) return prod(ops_new) else: for i in range(len(combs_order)): ops_tmp = [] op_str = str(prod(combs_order[i])) if len(combs_order[i]) == 1 and "adjoint" in op_str: op_str = op_str.replace("adjoint(", "")[:-1] ops_tmp.append(conjugate(Symbol("<" + op_str + ">"))) else: ops_tmp.append(Symbol("<" + op_str + ">")) op_str2 = str(prod(combs_remain[i])) if "adjoint" in op_str2 and len(combs_remain[i]) == 1: op_str2 = op_str2.replace("adjoint(", "")[:-1] ops_tmp.append(conjugate(Symbol("<" + op_str2 + ">"))) #TODO: replace "adjoint()" by "^\dagger" for longer averages else: ops_tmp.append(Symbol("<" + op_str2 + ">")) ops_new.append(prod(ops_tmp)) if divide: n = binomial(len(ops), order) else: n = 1 return sum(ops_new) / n
def __or__(self,B): if(B.rank != self.rank): raise ValueError("Cannot contract rank-{0} tensor with rank-{1} tensor.".format(self.rank, B.rank)) if(isinstance(B, TensorProductFunction)): if(self.symmetric): from itertools import permutations # It suffices to just iterate over rearrangements of `self`. coefficient = simplify(self.coefficient*B.coefficient*frac(1,factorial(self.rank))) if(coefficient==0): return sympify(0) return simplify( coefficient * sum([prod([v|w for v,w in zip(self.ordered_as(index_set), B)]) for index_set in permutations(range(self.rank))]) ) return (self.coefficient*B.coefficient)*prod([v|w for v,w in zip(self, B)]) else: try: return sum( [(self|t_p) for t_p in B] ) except AttributeError: raise ValueError("Don't know how to contract TensorProductFunction with '{0}'".format(type(B)))
def get_upper_degree(self): list_of_products = [ self.variables[i]**self.max_degrees[i] for i in range(self.n) ] product = prod(list_of_products) product = Poly(product).monoms() return monomial_deg(*product)
def volume(self): v = sp.prod([ self.maxCorner[d] - self.minCorner[d] for d in range(self.dim) ]) if simplify: return sp.simplify(assume_velocity(v.rewrite(sp.Piecewise))) else: return v
def scalar_unit(expr): # extract unit if not 0 if simplify(expr) == 0: raise Exception(""" ant determine the unit of 0, please provide an expression that does not evaluate to zero """) return simplify(prod((1.0 * expr.n()).args[1:]))
def factorize_prod(sym, order, divide_cumulants=False, hard_cutoff=False): factor, coeffs = sym.as_coeff_mul() # Get which coefficients are operators and parameters (symbols) ops = [] syms = [] for i in range(len(coeffs)): if is_operator(coeffs[i]): ops.append(coeffs[i]) elif isinstance(coeffs[i], sympy.power.Pow): if is_operator(coeffs[i].base): tmp = [coeffs[i].base for k in range(coeffs[i].exp)] ops.extend(tmp) else: syms.append(coeffs[i]) else: syms.append(coeffs[i]) if len(syms) == 0: syms = [1] # Factorize if product has more constituents than order if len(ops) > order: if hard_cutoff: ops_new = 0 else: bases_ = find_bases(ops) decision = decide_factor(bases_, order) if not decision: ops_new = cumulant_expansion(ops, order, divide_cumulants) # Return average if product is smaller order else: if len(ops) > 0: op_str = str(prod(ops)) if len(ops) == 1 and isinstance(ops[0], adjoint): op_str = op_str.replace("adjoint(", "")[:-1] ops_new = conjugate(Symbol("<" + op_str + ">")) else: ops_new = Symbol("<" + op_str + ">") else: ops_new = 1 return factor * prod(syms) * ops_new
def __sections(self): t = sp.symbols('t') GenSec = sp.prod(1 / (1 - (t * zz)) for zz in self.coordinates) poly = sp.series(GenSec, t, n=self.dimensions + 1).coeff(t**(self.dimensions)) sections = [] while poly != 0: sections.append(sp.LT(poly)) poly = poly - sp.LT(poly) return (np.array(sections), len(sections))
def ReduceExpr(expr): if isinstance(expr, (TensorFunction, TensorProductFunction,)): return expr if isinstance(expr, Mul): # Look for a Tensor here, and multiply everything else by it. # First, try to ReduceExpr on everything that will be going # into this calculation in hopes of getting some tensors. args = list(o if o.is_Atom or isinstance(o, (TensorFunction, TensorProductFunction,)) else ReduceExpr(o) for o in expr.args) tensors = prod(t for t in args if isinstance(t, (TensorFunction, TensorProductFunction,))) others = prod(o for o in args if not isinstance(o, (TensorFunction, TensorProductFunction,))) if tensors==1: return others else: return tensors*others if isinstance(expr, Add): return sum(ReduceExpr(arg) for arg in expr.args) return expr
def make_horizontal_tangents(var="x"): if isinstance(var, str): var = sympy.Symbol(var) elif isinstance(var, list): var = sympy.Symbol(random.choice(var)) df = sympy.prod([var - random.choice(digits_nozero) for i in range(random.randint(2,3))]) f = sympy.integrate(df, var) eqn = sympy.Eq(sympy.diff(f, var),0 ) fx = "f \\left(%s \\right)" % str(var) return render(f, fx), render(', '.join([str(var) + "=" + str(i) for i in sympy.solve(eqn)]))
def eval(cls, x, n): #NOTE: As of SymPy version 1.4, Float.is_integer == Float.is_zero try: if float(n).is_integer(): return prod(x + i for i in range(int(n))) else: raise NotImplementedError( "'n' in pochhammer symbol must be an integer") except TypeError: pass
def dEn(f, n): """Generate a single n-body term.""" if n < 1: return 0 elif n == 1: return sum(f) else: acc = prod(f) for i in range(1, n): acc -= sum(dEn(g, i) for g in combinations(f, i)) return acc
def test_extract_most_common_factor(): x, y = sympy.symbols('x y') expr = 1 / (x + y) + 3 / (x + y) + 3 / (x + y) most_common_factor = extract_most_common_factor(expr) assert most_common_factor[0] == 7 assert sympy.prod(most_common_factor) == expr expr = 1 / x + 3 / (x + y) + 3 / y most_common_factor = extract_most_common_factor(expr) assert most_common_factor[0] == 3 assert sympy.prod(most_common_factor) == expr expr = 1 / x most_common_factor = extract_most_common_factor(expr) assert most_common_factor[0] == 1 assert sympy.prod(most_common_factor) == expr assert most_common_factor[1] == expr
def lagrange_polys(xqs): n = len(xqs) Ls = [] for i in range(n): L_i = sp.prod((x - xqs[j]) for j in range(n) if j != i) # normalize: L_i = L_i / L_i.subs(x, xqs[i]) Ls.append(L_i) return Ls
def test_indexed_idx_sum(): i = symbols('i', cls=Idx) r = Indexed('r', i) assert Sum(r, (i, 0, 3)).doit() == sum([r.xreplace({i: j}) for j in range(4)]) assert Product(r, (i, 0, 3)).doit() == prod([r.xreplace({i: j}) for j in range(4)]) j = symbols('j', integer=True) assert Sum(r, (i, j, j+2)).doit() == sum([r.xreplace({i: j+k}) for k in range(3)]) assert Product(r, (i, j, j+2)).doit() == prod([r.xreplace({i: j+k}) for k in range(3)]) k = Idx('k', range=(1, 3)) A = IndexedBase('A') assert Sum(A[k], k).doit() == sum([A[Idx(j, (1, 3))] for j in range(1, 4)]) assert Product(A[k], k).doit() == prod([A[Idx(j, (1, 3))] for j in range(1, 4)]) raises(ValueError, lambda: Sum(A[k], (k, 1, 4))) raises(ValueError, lambda: Sum(A[k], (k, 0, 3))) raises(ValueError, lambda: Sum(A[k], (k, 2, oo))) raises(ValueError, lambda: Product(A[k], (k, 1, 4))) raises(ValueError, lambda: Product(A[k], (k, 0, 3))) raises(ValueError, lambda: Product(A[k], (k, 2, oo)))
def make_rational_poly_simplify(var="x"): """ Generates a rational expression of 4 polynomials, to be simplified. Example: ( (x**2 + 16*x + 60) / (x**2 - 36)) / ( (x**2 - 2*x - 63) / (x**2 - 5*x - 36) x : charector for the variable to be solved for. defaults to random selection from the global list `alpha`. OR a list of possible charectors. A random selection will be made from them. """ if not var: var = random.choice(alpha) elif isinstance(var, list): var = random.choice(var) exclude = [var.upper(), var.lower()] x = sympy.Symbol(var) select = shuffle(range(-10,-1) + range(1,10))[:6] e1 = sympy.prod([x - i for i in shuffle(select)[:2]]).expand() e2 = sympy.prod([x - i for i in shuffle(select)[:2]]).expand() e3 = sympy.prod([x - i for i in shuffle(select)[:2]]).expand() e4 = sympy.prod([x - i for i in shuffle(select)[:2]]).expand() L = len(set([e1, e2, e3, e4])) e = ((e1/e2) / (e3 / e4)) s1 = ''.join(["\\frac{", sympy.latex(e1), "}", "{", sympy.latex(e2), "}"]) s2 = ''.join(["\\frac{", sympy.latex(e3), "}", "{", sympy.latex(e4), "}"]) s3 = ''.join(["$$\\frac{", s1, "}", "{", s2, "}$$"]) pieces = str(e.factor()).split("/") try: num, denom= [parse_expr(i).expand() for i in pieces] except: return make_rational_poly_simplify(var) if len(pieces) !=2 or L < 4 or degree(num) > 2 or degree(denom) > 2: return make_rational_poly_simplify(var) return s3, render(num / denom)
def test_boson_states(): a = BosonOp("a") # Fock states n = 3 assert (BosonFockBra(0) * BosonFockKet(1)).doit() == 0 assert (BosonFockBra(1) * BosonFockKet(1)).doit() == 1 assert qapply(BosonFockBra(n) * Dagger(a)**n * BosonFockKet(0)) \ == sqrt(prod(range(1, n+1))) # Coherent states alpha1, alpha2 = 1.2, 4.3 assert (BosonCoherentBra(alpha1) * BosonCoherentKet(alpha1)).doit() == 1 assert (BosonCoherentBra(alpha2) * BosonCoherentKet(alpha2)).doit() == 1 assert abs((BosonCoherentBra(alpha1) * BosonCoherentKet(alpha2)).doit() - exp(-S(1) / 2 * (alpha1 - alpha2) ** 2)) < 1e-12 assert qapply(a * BosonCoherentKet(alpha1)) == \ alpha1 * BosonCoherentKet(alpha1)
def symbolic_coeffs_and_diffs(expr,u): ''' returns the coefficients for each term containing u or a derivative of u. Also returns the variables that derivatives of u are with respect to ''' # convert expr to a list of terms expr = expr.expand() expr = expr.as_ordered_terms() # throw out terms not containing u expr = [i for i in expr if i.has(u)] coeffs = [] diffs = [] for e in expr: # if the expression is a product then expand it into multipliers if e.is_Mul: e = sp.flatten(e.as_coeff_mul()) else: e = [sp.Integer(1),e] # find multipliers without the queried term without_u = [i for i in e if not i.has(u)] coeffs += [without_u] # find multipliers with the queried term with_u = [i for i in e if i.has(u)] if not (len(with_u) == 1): raise FormulationError( 'the term %s has multiple occurrences of %s' % (sp.prod(e),u)) base,diff = derivative_order(with_u[0]) if not (base == u): raise FormulationError( 'cannot express %s as a differential operation of %s' % (base,u)) diffs += diff, return coeffs,diffs
def volume_cell(self): """Volume of a single cell e.g h_x*h_y*h_z in 3D.""" return prod(d.spacing for d in self.dimensions).subs(self.spacing_map)
def equiv_pieri(g,P,Q,r,family): if not goes_to(g,P,Q): return 0 if g.type == 'A': if not goes_to(g,P,Q): return 0 zero_columns = equations(g,P,Q)[0] #T = sympy.symbols('t0:'+str(g.n+1)) #Y = sympy.symbols('y0:'+str(g.n+1)) #T2Y = dict([(T[i],sum([Y[j] for j in range(1,i+1)])) for i in range(1,g.n+1)]) #Y2T = dict([(Y[1],T[1])] + [(Y[i],T[i]-T[i-1]) for i in range(2,g.n+1)]) #a = g.n+2-g.m-r #F = (a+1)*[range(g.n+1)] #F[a] = [sympy.prod([(-g.T[i]+g.T[c]) for c in zero_columns]) for i in range(g.n+1)] #for j in reversed(range(2,a)): # for i in range(j): # F[j][i] = sympy.factor((F[j+1][i] - F[j+1][j]) / (g.T[j] - g.T[i])) #pieri = sympy.sympify(F[2][1]) #pieri = sympy.factor(sympy.expand(pieri.subs(T2Y))) #pieri = pieri.subs(Y2T) proj_rich = [sympy.prod([(-g.T[i]+g.T[c]) for c in zero_columns]) for i in range(0,g.n+1)] subspace_equations = range(g.n+2-g.m-r,g.n+1) lin_subspace = [sympy.prod([(-g.T[i]+g.T[c]) for c in subspace_equations]) for i in range(0,g.n+1)] class_to_integrate = [proj_rich[i]*lin_subspace[i] for i in range(0,g.n+1)] pieri = sum([class_to_integrate[i] / sympy.prod([g.T[c]-g.T[i] for c in range(1,g.n+1) if c != i]) for i in range(1,g.n+1)]) #pieri = pieri.subs(T2Y) return sympy.expand(sympy.factor(pieri)) if g.type == 'C': #if not goes_to(g,P,Q): # return 0 linear_equations = equations(g,P,Q)[0] q = num_equations(g,P,Q)[0] #Y = sympy.symbols('y0:'+str(g.n+1)) #T2Y = dict([(g.T[i],-sum([Y[j] for j in range(i,g.n)]) -.5*Y[g.n]) for i in range(1,g.n)] + [(g.T[g.n], -.5*Y[g.n])]) a = g.N+2-g.m-r F = (a+1)*[range(g.N+1)] F[a] = [((-2*g.T[i])**q)*sympy.prod([(-g.T[i]+g.T[c]) for c in linear_equations]) for i in range(g.N+1)] for j in reversed(range(2,a)): for i in range(j): F[j][i] = sympy.factor((F[j+1][i] - F[j+1][j]) / (g.T[j] - g.T[i])) pieri = sympy.sympify(F[2][1]) #pieri = pieri.subs(T2Y) return sympy.expand(pieri) if g.type == 'B': if not goes_to(g,P,Q): return 0 if not g.leq(Q, g.special_schubert(r,family)): return 0 if not g.codim(Q) <= g.codim(P) + r: return 0 Y = sympy.symbols('y0:'+str(g.n+1)) T2Y = dict([(g.T[i],-sum([Y[j] for j in range(i,g.n+1)])) for i in range(1,g.n+1)]) #figure out number of subspace equations quad = num_equations(g,P,Q)[0] linear_equations = equations(g,P,Q)[0] num_subspace_eqns = g.m + r - 1 special_factor = 1 if r > g.k: #num_subspace_eqns += 1 if quad > 0: quad -= 1 num_subspace_eqns += 1 else: special_factor = 2 #h = schubert.Grassmannian('C',g.m,g.n) #P_C = h.perm2index(g.index2perm(P)) #Q_C = h.perm2index(g.index2perm(Q)) #pieri = equiv_pieri(h,P_C,Q_C,r,0) / 2 #pieri = pieri.subs(T2Y) #return sympy.expand(pieri) #compute rho_* using divided difference operators (i.e. calculate coefficient of Schubert point class) #a = g.N+1-num_subspace_eqns #F = (a+1)*[range(g.N+1)] #F[a] = [((-2*g.T[i])**quad)*sympy.prod([(-g.T[i]+g.T[c]) for c in linear_equations]) for i in range(g.N+1)] #for j in reversed(range(2,a)): # for i in range(j): # F[j][i] = sympy.factor((F[j+1][i] - F[j+1][j]) / (g.T[j] - g.T[i])) #pieri = F[2][1] #compute rho_* using integration formula proj_rich = [((-2*g.T[i])**quad)*sympy.prod([(-g.T[i]+g.T[c]) for c in linear_equations]) for i in range(0,g.N+1)] subspace_equations = range(g.N+1-num_subspace_eqns,g.N+1) if special_factor == 2: subspace_equations.append(g.N-num_subspace_eqns) subspace_equations.sort() subspace_equations.remove(g.n+1) lin_subspace = [sympy.prod([(-g.T[i]+g.T[c]) for c in subspace_equations]) for i in range(0,g.N+1)] class_to_integrate = [proj_rich[i]*lin_subspace[i] for i in range(0,g.N+1)] pieri = sum([class_to_integrate[i] / sympy.prod([g.T[c]-g.T[i] for c in range(1,g.N+1) if c != i]) for i in range(1,g.N+1)]) #pieri = pieri.subs(T2Y) return sympy.expand(sympy.factor(pieri/special_factor)) if g.type == 'D': if not goes_to(g,P,Q): return 0 #T = sympy.symbols('t0:'+str(g.N)) #T_with_zero = [T[i] for i in range(g.n+1)] + [0] + [T[i] for i in range(g.n+1, g.N)] #T_SO = [T[i] for i in range(g.n+1)] + [0] + [-T[g.N-i] for i in range(g.n+1,g.N)] #figure out number of subspace equations delta = 1 quad = num_equations(g,P,Q)[0] linear_equations = equations(g,P,Q)[0] subspace_eqns = g.m + r - 1 if r > g.k: subspace_eqns += 1 if quad > 0: quad -= 1 if r == g.k: if quad == 0: delta = int(family + h(g,P,Q))%2 if quad > 0: subspace_eqns +=1 quad -=1 #compute rho_* using divided difference operators (i.e. calculate coefficient of Schubert point class) a = g.N+1-subspace_eqns F = (a+1)*[range(g.N+1)] F[a] = [((-2*g.T[i])**quad)*sympy.prod([(-g.T[i]+g.T[c]) for c in linear_equations]) for i in range(g.N+1)] for j in reversed(range(2,a)): for i in range(j): F[j][i] = sympy.factor((F[j+1][i] - F[j+1][j]) / (g.T[j] - g.T[i])) pieri = sympy.sympify(delta*F[2][1]) return sympy.expand(pieri)
def suminfo(ds, base=2): cs = sumcounts(ds) N = sympy.prod(ds) ps = [ sympy.sympify(c)/N for c in cs.values() ] return info(ps, base)
def TensorPart(ell,m,j): return sympy.prod((mVec,)*m) * SymmetricTensorProduct(*((zHat,)*(ell-2*j-m))) \ * sympy.prod([sum([SymmetricTensorProduct(vHat, vHat) for vHat in OrthogonalRightHandedBasis]) for i in range(j)])