def test_array_as_explicit_call(): assert ZeroArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray.zeros( 3, 2, 4) assert OneArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray( [1 for i in range(3 * 2 * 4)]).reshape(3, 2, 4) k = Symbol("k") X = ArraySymbol("X", k, 3, 2) raises(ValueError, lambda: X.as_explicit()) raises(ValueError, lambda: ZeroArray(k, 2, 3).as_explicit()) raises(ValueError, lambda: OneArray(2, k, 2).as_explicit()) A = ArraySymbol("A", 3, 3) B = ArraySymbol("B", 3, 3) texpr = tensorproduct(A, B) assert isinstance(texpr, ArrayTensorProduct) assert texpr.as_explicit() == tensorproduct(A.as_explicit(), B.as_explicit()) texpr = tensorcontraction(A, (0, 1)) assert isinstance(texpr, ArrayContraction) assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2] texpr = tensordiagonal(A, (0, 1)) assert isinstance(texpr, ArrayDiagonal) assert texpr.as_explicit() == ImmutableDenseNDimArray( [A[0, 0], A[1, 1], A[2, 2]]) texpr = permutedims(A, [1, 0]) assert isinstance(texpr, PermuteDims) assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0])
def test_contraction_permutation_mix(): Me = M.subs(k, 3).as_explicit() Ne = N.subs(k, 3).as_explicit() cg1 = CodegenArrayContraction( CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), Permutation([0, 2, 1, 3])), (2, 3)) cg2 = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 3)) assert cg1 == cg2 assert recognize_matrix_expression(cg2) == M * N.T cge1 = tensorcontraction( permutedims(tensorproduct(Me, Ne), Permutation([0, 2, 1, 3])), (2, 3)) cge2 = tensorcontraction(tensorproduct(Me, Ne), (1, 3)) assert cge1 == cge2 cg1 = CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), Permutation([0, 1, 3, 2])) cg2 = CodegenArrayTensorProduct( M, CodegenArrayPermuteDims(N, Permutation([1, 0]))) assert cg1 == cg2 assert recognize_matrix_expression(cg1) == CodegenArrayTensorProduct( M, N.T) assert recognize_matrix_expression(cg2) == CodegenArrayTensorProduct( M, N.T) cg1 = CodegenArrayContraction( CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N, P, Q), Permutation([0, 2, 3, 1, 4, 5, 7, 6])), (1, 2), (3, 5)) cg2 = CodegenArrayContraction( CodegenArrayTensorProduct( M, N, P, CodegenArrayPermuteDims(Q, Permutation([1, 0]))), (1, 5), (2, 3)) assert cg1 == cg2 assert recognize_matrix_expression(cg1) == CodegenArrayTensorProduct( M * P.T * Trace(N), Q.T) assert recognize_matrix_expression(cg2) == CodegenArrayTensorProduct( M * P.T * Trace(N), Q.T) cg1 = CodegenArrayContraction( CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N, P, Q), Permutation([1, 0, 4, 6, 2, 7, 5, 3])), (0, 1), (2, 6), (3, 7)) cg2 = CodegenArrayPermuteDims( CodegenArrayContraction(CodegenArrayTensorProduct(M, P, Q, N), (0, 1), (2, 3), (4, 7)), [1, 0]) assert cg1 == cg2 cg1 = CodegenArrayContraction( CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N, P, Q), Permutation([1, 0, 4, 6, 7, 2, 5, 3])), (0, 1), (2, 6), (3, 7)) cg2 = CodegenArrayPermuteDims( CodegenArrayContraction( CodegenArrayTensorProduct(CodegenArrayPermuteDims(M, [1, 0]), N, P, Q), (0, 1), (3, 6), (4, 5)), Permutation([1, 0])) assert cg1 == cg2
def compute_Gamma(g_deriv, gUP): """Return Christoffel symbols """ g_derivT = Array([(g_deriv[:, :, i]).transpose() for i in range(4)]) gUgd = tensorproduct(gUP, g_deriv) gUgdT = tensorproduct(gUP, g_derivT) return 1 / 2 * (tensorcontraction(gUgd, (1, 3)) + tensorcontraction( gUgdT, (1, 3)) - tensorcontraction(gUgd, (1, 2)))
def test_arrayexpr_contraction_permutation_mix(): Me = M.subs(k, 3).as_explicit() Ne = N.subs(k, 3).as_explicit() cg1 = ArrayContraction(PermuteDims(ArrayTensorProduct(M, N), Permutation([0, 2, 1, 3])), (2, 3)) cg2 = ArrayContraction(ArrayTensorProduct(M, N), (1, 3)) assert cg1 == cg2 cge1 = tensorcontraction(permutedims(tensorproduct(Me, Ne), Permutation([0, 2, 1, 3])), (2, 3)) cge2 = tensorcontraction(tensorproduct(Me, Ne), (1, 3)) assert cge1 == cge2 cg1 = PermuteDims(ArrayTensorProduct(M, N), Permutation([0, 1, 3, 2])) cg2 = ArrayTensorProduct(M, PermuteDims(N, Permutation([1, 0]))) assert cg1 == cg2 cg1 = ArrayContraction( PermuteDims( ArrayTensorProduct(M, N, P, Q), Permutation([0, 2, 3, 1, 4, 5, 7, 6])), (1, 2), (3, 5) ) cg2 = ArrayContraction( ArrayTensorProduct(M, N, P, PermuteDims(Q, Permutation([1, 0]))), (1, 5), (2, 3) ) assert cg1 == cg2 cg1 = ArrayContraction( PermuteDims( ArrayTensorProduct(M, N, P, Q), Permutation([1, 0, 4, 6, 2, 7, 5, 3])), (0, 1), (2, 6), (3, 7) ) cg2 = PermuteDims( ArrayContraction( ArrayTensorProduct(M, P, Q, N), (0, 1), (2, 3), (4, 7)), [1, 0] ) assert cg1 == cg2 cg1 = ArrayContraction( PermuteDims( ArrayTensorProduct(M, N, P, Q), Permutation([1, 0, 4, 6, 7, 2, 5, 3])), (0, 1), (2, 6), (3, 7) ) cg2 = PermuteDims( ArrayContraction( ArrayTensorProduct(PermuteDims(M, [1, 0]), N, P, Q), (0, 1), (3, 6), (4, 5) ), Permutation([1, 0]) ) assert cg1 == cg2
def init_riemann(self): """ Riemann tensor of the metric, which is a 4-index tensor. """ riemann = sp.MutableDenseNDimArray(np.zeros((self.dim,)*4)) # Inizializing 4-index tensor dchr = sp.MutableDenseNDimArray(np.zeros((self.dim,)*4)) # Derivative of Christoffel symbols if isinstance(self.chr, type(None)): self.init_chr() # Initialize Christoffel symbols (if not already done) for mu in range(self.dim): dchr[:,:,:,mu] = sp.diff(self.chr, self.variables[mu]) for sigma in range(self.dim): for rho in range(self.dim): riemann[rho,sigma,:,:] = dchr[rho,:,sigma,:].transpose() - dchr[rho,:,sigma,:] \ + sp.tensorcontraction(sp.tensorproduct(self.chr[rho,:,:], self.chr[:,:,sigma]),(1,2)) \ - (sp.tensorcontraction(sp.tensorproduct(self.chr[rho,:,:], self.chr[:,:,sigma]),(1,2))).transpose() self.riemann = sp.simplify(riemann)
def geodesic_ncurve(surface: ParametricSurface, ic_uv, ic_uv_t, t1=5, dt=0.05): from sympy import lambdify from scipy.integrate import ode as sciode import numpy as np from sympy import symbols, Function, Array, tensorproduct, tensorcontraction t = symbols('t', real=True) u = Function(surface.sym(0), real=True)(t) v = Function(surface.sym(1), real=True)(t) second_term_tensor = tensorproduct( surface.christoffel_symbol.tensor().subs( {surface.sym(0):u, surface.sym(1):v}), Array([u, v]).diff(t), Array([u, v]).diff(t)) second_term_tensor = tensorcontraction(second_term_tensor, (1, 3), (2, 4)) u_t = Function(str(u)+'^{\prime}', real=True)(t) v_t = Function(str(v)+'^{\prime}', real=True)(t) lambdify_sympy = lambdify((u, u_t, v, v_t), [ u_t, -second_term_tensor[0].subs({u.diff(t):u_t, v.diff(t):v_t}), v_t, -second_term_tensor[1].subs({u.diff(t):u_t, v.diff(t):v_t})]) x0, t0 = [ic_uv[0], ic_uv_t[0], ic_uv[1], ic_uv_t[1]], 0.0 scioder = sciode(lambda t,X: lambdify_sympy(*X)).set_integrator('vode', method='bdf') scioder.set_initial_value(x0, t0) num_of_t = int(t1 / dt); # num_of_t u_arr = np.empty((num_of_t, 4)); u_arr[0] = x0 t_arr = np.arange(num_of_t) * dt i = 0 while scioder.successful() and i < num_of_t-1: i += 1 u_arr[i] = scioder.integrate(scioder.t+dt) return t_arr, (u_arr[:, 0], u_arr[:, 2])
def tensordot(a, b): s1 = a.shape s2 = b.shape assert s1[-1] == s2[0] s3 = s1[:-1] + s2[1:] k = len(s1) - 1 return tensorcontraction(tensorproduct(a, b), (k, k + 1))
def compute_Upsilon(g_deriv, uUP): """Return Upsilon = Upsilon_ alpha beta = g_beta gamma, alpha uUP^gamma """ gdU = tensorproduct(g_deriv, uUP) return -Matrix(tensorcontraction(gdU, (1, 3)))
def LineElement(): print("Starting Test: Line Element...") try: t, x, y, z = sympy.symbols("t x y z") dt, dx, dy, dz = sympy.symbols('dt dx dy dz') eta = bt.GRMetric([t, x, y, z], sympy.Matrix([[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])) dx = bt.GRTensor([eta], sympy.Array([dt, dx, dy, dz])) rhs_ = sympy.tensorcontraction( sympy.tensorproduct(eta.lowered, dx.vals), (1, 2)) rhs = sympy.tensorcontraction(sympy.tensorproduct(rhs_, dx.vals), (0, 1)) assert rhs + dt**2 - dx**2 - dy**2 - dz**2 == 0 print("Test: Line Element - Passed") return 1 except: print(rhs) print(type(rhs)) print("Test: Line Element - Failed") return 0
def V(n): """Elimination theory vector of variables.""" if n == 3: return [] else: zs = punctures(n)[1:n - 3] elimination_vector = flatten(sympy.Matrix([1, zs[0]])) for i in range(1, len(zs)): elimination_vector = flatten( sympy.tensorproduct([zs[i]**j for j in range(i + 2)], elimination_vector)) return elimination_vector
def test_array_as_explicit_matrix_symbol(): A = MatrixSymbol("A", 3, 3) B = MatrixSymbol("B", 3, 3) texpr = tensorproduct(A, B) assert isinstance(texpr, ArrayTensorProduct) assert texpr.as_explicit() == tensorproduct(A.as_explicit(), B.as_explicit()) texpr = tensorcontraction(A, (0, 1)) assert isinstance(texpr, ArrayContraction) assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2] texpr = tensordiagonal(A, (0, 1)) assert isinstance(texpr, ArrayDiagonal) assert texpr.as_explicit() == ImmutableDenseNDimArray( [A[0, 0], A[1, 1], A[2, 2]]) texpr = permutedims(A, [1, 0]) assert isinstance(texpr, PermuteDims) assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0])
def lorentz_transform(self, transformation_matrix): """ Performs a Lorentz transform on the tensor. Parameters ---------- transformation_matrix : ~sympy.tensor.array.dense_ndim_array.ImmutableDenseNDimArray or list Sympy Array or multi-dimensional list containing Sympy Expressions Returns ------- ~einsteinpy.symbolic.tensor.BaseRelativityTensor lorentz transformed tensor(or vector) """ tm = sympy.Array(transformation_matrix) t = self.tensor() for i in range(self.order): if self.config[i] == "u": t = simplify( tensorcontraction(tensorproduct(tm, t), (1, 2 + i))) else: t = simplify( tensorcontraction(tensorproduct(tm, t), (0, 2 + i))) tmp = np.array(t.tolist()).reshape(t.shape) source, dest = list(range(len(t.shape))), list(range(len(t.shape))) dest.pop(i) dest.insert(0, i) tmp = np.moveaxis(tmp, source, dest) t = sympy.Array(tmp) return BaseRelativityTensor( t, syms=self.syms, config=self.config, parent_metric=None, variables=self.variables, functions=self.functions, )
def tensor3_vector_product(T, v): """Implements a product of a rank-3 tensor (3D array) with a vector using tensor product and tensor contraction. Parameters ---------- T: sp.Array of dimensions n x m x k v: sp.Array of dimensions k x 1 Returns ------- A: sp.Array of dimensions n x m Example ------- >>>T = sp.Array([[[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]], [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]]) ⎡⎡1 4 7 10⎤ ⎡13 16 19 22⎤⎤ ⎢⎢ ⎥ ⎢ ⎥⎥ ⎢⎢2 5 8 11⎥ ⎢14 17 20 23⎥⎥ ⎢⎢ ⎥ ⎢ ⎥⎥ ⎣⎣3 6 9 12⎦ ⎣15 18 21 24⎦⎦ >>>v = sp.Array([1, 2, 3, 4]).reshape(4, 1) ⎡1⎤ ⎢ ⎥ ⎢2⎥ ⎢ ⎥ ⎢3⎥ ⎢ ⎥ ⎣4⎦ >>>tensor3_vector_product(T, v) ⎡⎡70⎤ ⎡190⎤⎤ ⎢⎢ ⎥ ⎢ ⎥⎥ ⎢⎢80⎥ ⎢200⎥⎥ ⎢⎢ ⎥ ⎢ ⎥⎥ ⎣⎣90⎦ ⎣210⎦⎦ """ import sympy as sp assert (T.rank() == 3) # reshape v to ensure 1D vector so that contraction do not contain x 1 # dimension v.reshape(v.shape[0], ) p = sp.tensorproduct(T, v) return sp.tensorcontraction(p, (2, 3))
def tensor_product(tensor1, tensor2, i=None, j=None): """Tensor Product of ``tensor1`` and ``tensor2`` Parameters ---------- tensor1 : ~einsteinpy.symbolic.BaseRelativityTensor tensor2 : ~einsteinpy.symbolic.BaseRelativityTensor i : int, optional contract ``i``th index of ``tensor1`` j : int, optional contract ``j``th index of ``tensor2`` Returns ------- ~einsteinpy.symbolic.BaseRelativityTensor tensor of appropriate rank Raises ------ ValueError Raised when ``i`` and ``j`` both indicate 'u' or 'l' indices """ product = tensorproduct(tensor1.arr, tensor2.arr) if (i or j) is None: newconfig = tensor1.config + tensor2.config else: if tensor1.config[i] == tensor2.config[j]: raise ValueError( "Index summation not allowed between %s and %s indices" % (tensor1.config[i], tensor2.config[j]) ) product = simplify(tensorcontraction(product, (i, len(tensor1.config) + j))) con = tensor1.config[:i] + tensor1.config[i + 1 :] fig = tensor2.config[:j] + tensor2.config[j + 1 :] newconfig = con + fig return BaseRelativityTensor( product, syms=tensor1.syms, config=newconfig, parent_metric=tensor1.parent_metric, variables=tensor1.variables, functions=tensor1.functions, )
def chain_config_change(): t = sympy.Array(tensor.tensor()) difflist = _difference_list(newconfig, tensor.config) for i, action in enumerate(difflist): if action == 0: continue else: t = simplify( tensorcontraction(tensorproduct(met_dict[action], t), (1, 2 + i))) # reshuffle the indices dest = list(range(len(t.shape))) dest.remove(0) dest.insert(i, 0) t = sympy.permutedims(t, dest) return t
def test_nested_permutations(): cg = CodegenArrayPermuteDims(CodegenArrayPermuteDims(M, (1, 0)), (1, 0)) assert cg == M times = 3 plist1 = [list(range(6)) for i in range(times)] plist2 = [list(range(6)) for i in range(times)] for i in range(times): random.shuffle(plist1[i]) random.shuffle(plist2[i]) plist1.append([2, 5, 4, 1, 0, 3]) plist2.append([3, 5, 0, 4, 1, 2]) plist1.append([2, 5, 4, 0, 3, 1]) plist2.append([3, 0, 5, 1, 2, 4]) plist1.append([5, 4, 2, 0, 3, 1]) plist2.append([4, 5, 0, 2, 3, 1]) Me = M.subs(k, 3).as_explicit() Ne = N.subs(k, 3).as_explicit() Pe = P.subs(k, 3).as_explicit() cge = tensorproduct(Me, Ne, Pe) for permutation_array1, permutation_array2 in zip(plist1, plist2): p1 = Permutation(permutation_array1) p2 = Permutation(permutation_array2) cg = CodegenArrayPermuteDims( CodegenArrayPermuteDims( CodegenArrayTensorProduct(M, N, P), p1), p2 ) result = CodegenArrayPermuteDims( CodegenArrayTensorProduct(M, N, P), p2*p1 ) assert cg == result # Check that `permutedims` behaves the same way with explicit-component arrays: result1 = permutedims(permutedims(cge, p1), p2) result2 = permutedims(cge, p2*p1) assert result1 == result2
def tensor_product(T1, T2, contraction=None): """Take the tensor product of two tensors. Arguments: T1 (Tensor) -- The first tensor to be multiplied T2 (Tensor) -- The second tensor to be multiplied Keyword Arguments: contraction (tuple) -- Indices to contract, maximum length of 2 (default None) Return Type -- Tensor """ new_ind = T1.indices[:] + T2.indices[:] new_vals = sympy.tensorproduct(copy.deepcopy(T1.vals), copy.deepcopy(T2.vals)) Tf = objects.Tensor(new_ind, new_vals) if contraction: Tf = tensor_contract(Tf, contraction[0], contraction[1]) return Tf
def chain_config_change(): t = sympy.Array(tensor.tensor()) difflist = _difference_list(newconfig, tensor.config) for i, action in enumerate(difflist): if action == 0: continue else: t = simplify( tensorcontraction(tensorproduct(met_dict[action], t), (1, 2 + i))) # reshuffle the indices tmp = np.array(t.tolist()).reshape(t.shape) source, dest = list(range(len(t.shape))), list( range(len(t.shape))) dest.pop(i) dest.insert(0, i) tmp = np.moveaxis(tmp, source, dest) t = sympy.Array(tmp) return t
def weyl(self): r""" Returns the Weyl conformal tensor using the formula: C_{\rho\sigma\mu\nu} = R_{\rho\sigma\mu\nu} - \frac{2}{(n - 2)} (g_{\rho[\mu} R_{\nu]\sigma} - g_{\sigma[\mu} R_{\nu]\rho}) + \frac{2}{(n - 1)(n - 2)} g_{\rho[\mu} g_{\nu]\sigma} R """ if self._weyl is None: n = self.dim if n < 3: raise ValueError( "the Weyl tensor is only defined in dimensions of 3 or more. {} is of dimension {}" .format(self, n)) elif n == 3: res = tensorproduct(zeros(3, 3), zeros(3, 3)) self._weyl = Tensor("C", res, self, symmetry=[[2, 2]], covar=(1, -1, -1, -1)) return self._weyl c1 = Rational(1, n - 2) c2 = Rational(1, (n - 2) * (n - 1)) mu, nu, si, rh = indices("mu nu sigma rho", self) R = self.riemann RR = self.ricci_tensor RRR = self.ricci_scalar g = self.metric C = (R(rh, -si, -mu, -nu) - c1 * (g(rh, -mu) * RR(-nu, -si) - g(rh, -nu) * RR(-mu, -si) + g(-si, -nu) * RR(-mu, rh) - g(-si, -mu) * RR(-nu, rh)) + c2 * (g(rh, -mu) * g(-nu, -si) - g(rh, -nu) * g(-mu, -si)) * RRR) res = expand_tensor(C, [rh, -si, -mu, -nu]) self._weyl = Tensor("C", res, self, symmetry=[[2, 2]], covar=(1, -1, -1, -1)) return self._weyl
def test_array_symbol_and_element(): A = ArraySymbol("A", 2) A0 = ArrayElement(A, (0,)) A1 = ArrayElement(A, (1,)) assert A.as_explicit() == ImmutableDenseNDimArray([A0, A1]) A2 = tensorproduct(A, A) assert A2.shape == (2, 2) # TODO: not yet supported: # assert A2.as_explicit() == Array([[A[0]*A[0], A[1]*A[0]], [A[0]*A[1], A[1]*A[1]]]) A3 = tensorcontraction(A2, (0, 1)) assert A3.shape == () # TODO: not yet supported: # assert A3.as_explicit() == Array([]) A = ArraySymbol("A", 2, 3, 4) Ae = A.as_explicit() assert Ae == ImmutableDenseNDimArray( [[[ArrayElement(A, (i, j, k)) for k in range(4)] for j in range(3)] for i in range(2)]) p = permutedims(A, Permutation(0, 2, 1)) assert isinstance(p, PermuteDims)
def test_Metric_weyl(): x, y, z = symbols("x y z", real=True) I = Metric("I", (x, y, z), eye(3)) zero_tensor = tensorproduct(zeros(3, 3), zeros(3, 3)) assert I.weyl.as_array() == zero_tensor (coords, t, r, th, ph, schw, g, mu, nu) = _generate_schwarzschild() rh, si = indices("rho sigma", g) C = g.weyl def is_zero(arr): for comp in arr: yield comp.equals(0) expr = C(-rh, -si, -mu, -nu) + C(-si, -rh, -mu, -nu) assert all(is_zero(expand_tensor(expr))) expr = C(-rh, -si, -mu, -nu) + C(-rh, -si, -nu, -mu) assert all(is_zero(expand_tensor(expr))) expr = C(-rh, -si, -mu, -nu) - C(-mu, -nu, -rh, -si) assert all(is_zero(expand_tensor(expr))) expr = C(-rh, -si, -mu, -nu) + C(-rh, -mu, -nu, -si) + C( -rh, -nu, -si, -mu) assert all(is_zero(expand_tensor(expr)))
exec('W.append(w' + str(FromLayer) + str(i) + str(j) + ')') W = sp.Array(W) W = W.reshape(outputNeurons, inputNeurons) return (W) #_weights_between_layers_1_and_2____ W_l1tol2 = weights(1, 4, 4) #_weights_between_layers_2_and_3____ W_l2tol3 = weights(2, 4, 4) #_weights_between_layers_3_and_4____ W_l3tol4 = weights(3, 4, 2) #_weights_between_layers_4_and_5____ W_l4tol5 = weights(4, 2, 1) #______________________________________________________________________________ #_function_definition_and_input______________ f = lambda x: 1 / (1 + sp.exp(-x)) input = sp.Array([inp1, inp2, inp3, inp4]) #_____________________________________________ #_Outputs_____________________________________________________________________ o_l1 = [f(input[0]), f(input[1]), f(input[2]), f(input[3])] o_l2 = sp.tensorcontraction( sp.tensorproduct([o_l1], W_l1tol2)[0, :, :, :].applyfunc(f), (0, 1)) o_l3 = sp.tensorcontraction( sp.tensorproduct([o_l2], W_l2tol3)[0, :, :, :].applyfunc(f), (0, 1)) o_l4 = sp.tensorcontraction( sp.tensorproduct([o_l3], W_l3tol4)[0, :, :, :].applyfunc(f), (0, 1)) o_l5 = sp.tensorcontraction( sp.tensorproduct([o_l4], W_l4tol5)[0, :, :, :].applyfunc(f), (0, 1)) #______________________________________________________________________________
def as_explicit(self): return tensorproduct(*[arg.as_explicit() if hasattr(arg, "as_explicit") else arg for arg in self.args])
#Computes the determinant of the stokeslet. import sympy as sy from sympy import Q import sympy.diffgeom as diffg import numpy as np import sympy.vector as sv from sympy.diffgeom.rn import R3_r import matplotlib.pyplot as pl x1, x2, x3 = sy.symbols('r_1 r_2 r_3', real=True) y1, y2, y3 = sy.symbols('y_1 y_2 y_3', real=True) mu = sy.symbols('mu', real=True) a = sy.symbols('a', real=True) r = sy.Array([y1 - x1, y2 - x2, y3 - x3]) rv = sy.Matrix([y1 - x1, y2 - x2, y3 - x3]) R = rv.norm() rr = sy.tensorproduct(r, r) G = (3 / 4) * a * (rr.tomatrix() / (R**3) + sy.eye(3) / R) print(sy.latex(sy.det(G)))
v11 = 1 v22 = 1 r11 = y1 - x11 r12 = y2 - x12 r21 = y1 - x21 r22 = y2 - x22 r1 = sy.Array([r11, r12]) r2 = sy.Array([r21, r22]) rr1 = sy.Matrix([r11, r12]) rr2 = sy.Matrix([r21, r22]) v1 = sy.Matrix([v11, v12]) v2 = sy.Matrix([v21, v22]) r1r1 = sy.tensorproduct(r1, r1).tomatrix() r2r2 = sy.tensorproduct(r2, r2).tomatrix() G1 = 0.01 * (3 / 4) * (r1r1 / (rr1.norm()**3) + sy.eye(2) / (rr1.norm())) G2 = 0.01 * (3 / 4) * (r2r2 / (rr2.norm()**3) + sy.eye(2) / (rr2.norm())) u1 = G1 * v1 u2 = G2 * v2 G = sy.Matrix([[0, 0], [0, 0]]) G[:, 0] = u1 G[:, 1] = u2 # detG3=sy.det(G) # print(sy.latex(detG3))
def generate_geodesic_clcode(g_deriv, gUP, uUP, level): """Reduce RHS to 1 equation""" # TODO: This function is a little bit too complicated (cyclomatic complexity # is 19...). It may be useful to break it down into smaller pieces, so # that testing can also be improved. # in our template we have to insert: # - The CSE symbols # - Additional definitions/manipulations # - The RHS # To do this we initialize the variable clcode with the cse symbols, and we # append everything else to it if (level == 1): # Upsilon_ beta alpha = g_beta gamma, alpha uUP gamma Upsilon = compute_Upsilon(g_deriv, uUP) gUPuUPUpsilon = tensorproduct(tensorproduct(gUP, uUP), Upsilon) # Xi1^mu = g^mu alpha uUP beta Upsilon_beta alpha # Xi2^mu = - 1/2 g^mu alpha u^beta Upsilon_alpha beta Xi1UP = tensor_contractions(gUPuUPUpsilon, [(2, 3), (1, 4)]) Xi2UP = tensor_contractions(-1 / 2 * gUPuUPUpsilon, [(1, 3), (2, 3)]) # rhs^mu = -(Xi1^ mu + Xi2^ mu) rhsUP, clcode = apply_cse_and_assign_symbols(list(Xi1UP + Xi2UP)) clcode += assign_vector('rhs', rhsUP) elif (level == 2): Upsilon = compute_Upsilon(g_deriv, uUP) Xi = Upsilon.transpose() - Upsilon / 2 # Lambda = gUP * Xi Lambda, clcode = apply_cse_and_assign_symbols(gUP * Xi) clcode += assign_matrix('Lambda', Lambda[0]) clcode += "real4 rhs = matrix_vector_product(Lambda, u);\n" elif (level == 3): Upsilon = compute_Upsilon(g_deriv, uUP) Xi = Upsilon.transpose() - Upsilon / 2 (Xi, gUP), clcode = apply_cse_and_assign_symbols([Xi, gUP]) clcode += assign_matrices({'Xi': Xi, 'gUP': gUP}) clcode += "real4 rhs = matrix_vector_product(gUP, matrix_vector_product(Xi, u));\n" elif (level == 4): Upsilon = compute_Upsilon(g_deriv, uUP) (Upsilon, gUP), clcode = apply_cse_and_assign_symbols([Upsilon, gUP]) clcode += assign_matrices({'Upsilon': Upsilon, 'gUP': gUP}) clcode += assign_transposed_matrix('Upsilon') clcode += "real16 Xi = UpsilonT - Upsilon/2;\n" clcode += "real4 rhs = matrix_vector_product(gUP, matrix_vector_product(Xi, u));\n" elif (level == 5): Phi = compute_Phi(g_deriv) (*Phi, gUP), clcode = apply_cse_and_assign_symbols([*Phi, gUP]) clcode += assign_matrices({ 'Phi_t': Phi[0], 'Phi_x': Phi[1], 'Phi_y': Phi[2], 'Phi_z': Phi[3], 'gUP': gUP }) clcode += "real16 Upsilon = -uUPt * Phi_t - uUPx * Phi_x" clcode += " - uUPy * Phi_y - uUPz * Phi_z;\n" clcode += assign_transposed_matrix('Upsilon') clcode += "real16 Xi = UpsilonT - Upsilon/2;\n" clcode += "real4 rhs = matrix_vector_product(gUP, matrix_vector_product(Xi, u));\n" elif (level == 6): # Typically this level is very slow because it has to parse a lot of # long sympy expressions # CSE does not handle very well g_deriv, so we split it g_deriv = restructure_derivatives_to_list(g_deriv) (*g_deriv, gUP), clcode = apply_cse_and_assign_symbols([*g_deriv, gUP]) d = {0: 't', 1: 'x', 2: 'y', 3: 'z'} # Write derivatives for a in range(4): for b in range(4): for c in range(4): clcode += assign_scalar("g_" + d[a] + d[b] + d[c], g_deriv[c][a, b]) for c in range(4): clcode += "real16 Phi_" + d[c] + " = {" + \ ", ".join(["g_" + d[b] + d[c] + d[a] for a in range(4) for b in range(4)]) + "};\n" clcode += "real16 Upsilon = -uUPt * Phi_t - uUPx * Phi_x" clcode += " - uUPy * Phi_y - uUPz * Phi_z;\n" clcode += assign_transposed_matrix('Upsilon') clcode += assign_matrix('gUP', gUP) clcode += "real16 Xi = UpsilonT - Upsilon/2;\n" clcode += "real4 rhs = matrix_vector_product(gUP, matrix_vector_product(Xi, u));\n" elif (level == 7): Gamma = compute_Gamma(g_deriv, gUP) GammauUPuUP = tensorproduct(tensorproduct(Gamma, uUP), uUP) rhsUP = -tensor_contractions(GammauUPuUP, [(1, 3), (2, 4)]) rhsUP, clcode = apply_cse_and_assign_symbols(list(rhsUP)) clcode += assign_vector('rhs', rhsUP) elif (level == 8): Gamma = [Matrix(G) for G in compute_Gamma(g_deriv, gUP)] GammaUP, clcode = apply_cse_and_assign_symbols(Gamma) d = {0: 't', 1: 'x', 2: 'y', 3: 'z'} clcode += assign_matrices( {"GammaUP" + d[i]: GammaUP[i] for i in range(4)}) clcode += "real4 rhs = {-dot(u, matrix_vector_product(GammaUPt, u))," clcode += "-dot(u, matrix_vector_product(GammaUPx, u))," clcode += "-dot(u, matrix_vector_product(GammaUPy, u))," clcode += "-dot(u, matrix_vector_product(GammaUPz, u))};\n" elif (level == 9): # CSE does not handle very well g_deriv, so we split it # and we make a list with all the derivatives # g_deriv[0] is derivative with respect to t, and so on g_deriv = restructure_derivatives_to_list(g_deriv) # We unpack to avoid nested lists (*g_deriv, gUP), clcode = apply_cse_and_assign_symbols([*g_deriv, gUP]) d = {0: 't', 1: 'x', 2: 'y', 3: 'z'} # Write derivatives for a in range(4): for b in range(4): for c in range(4): clcode += assign_scalar("g_" + d[a] + d[b] + d[c], g_deriv[c][a, b]) clcode += assign_vectors( {'gUP' + d[i]: list(gUP.row(i)) for i in range(4)}) for c in range(4): clcode += "real16 A_" + d[c] + " = {" + \ ", ".join(["g_" + d[c] + d[a] + d[b] for a in range(4) for b in range(4)]) + "};\n" clcode += "real16 B_" + d[c] + " = {" + \ ", ".join(["g_" + d[c] + d[b] + d[a] for a in range(4) for b in range(4)]) + "};\n" clcode += "real16 C_" + d[c] + " = {" + \ ", ".join(["g_" + d[a] + d[b] + d[c] for a in range(4) for b in range(4)]) + "};\n" for c in range(4): clcode += "real16 GammaUP" + d[c] + " = 0.5 * (" + \ "gUP" + d[c] + ".s0 * (A_t + B_t - C_t) + " \ "gUP" + d[c] + ".s1 * (A_x + B_x - C_x) + " \ "gUP" + d[c] + ".s2 * (A_y + B_y - C_y) + " \ "gUP" + d[c] + ".s3 * (A_z + B_z - C_z));\n" clcode += "real4 rhs = {-dot(u, matrix_vector_product(GammaUPt, u))," clcode += "-dot(u, matrix_vector_product(GammaUPx, u))," clcode += "-dot(u, matrix_vector_product(GammaUPy, u))," clcode += "-dot(u, matrix_vector_product(GammaUPz, u))};\n" else: raise ValueError("Level {} not implemented".format(level)) return clcode
y1, y2, y3 = sy.symbols('y_1 y_2 y_3', real=True) mu, eta = sy.symbols('mu eta', real=True) theta_1, theta_2, theta_3, a = sy.symbols('theta_1,theta_2,theta_3, a', real=True) r1 = sy.Array([y1 - a * sy.cos(theta_1), y2 - a * sy.sin(theta_1), 0]) r1vec = sy.Matrix([y1 - a * sy.cos(theta_1), y2 - a * sy.sin(theta_1), 0]) r2 = sy.Array([y1 - a * sy.cos(theta_2), y2 - a * sy.sin(theta_2), 0]) r2vec = sy.Matrix([y1 - a * sy.cos(theta_2), y2 - a * sy.sin(theta_2), 0]) r3 = sy.Array([y1 - a * sy.cos(theta_3), y2 - a * sy.sin(theta_3), 0]) r3vec = sy.Matrix([y1 - a * sy.cos(theta_3), y2 - a * sy.sin(theta_3), 0]) R1 = r1vec.norm() rr1 = sy.tensorproduct(r1, r1) G1 = (3 / 4) * eta * (rr1.tomatrix() / (R1**3) + sy.eye(3) / R1) # +(1/8)*(eta**3)*(2*sy.eye(3)/(R**3)-6*rr.tomatrix()/(R**5)) R2 = r2vec.norm() rr2 = sy.tensorproduct(r2, r2) G2 = (3 / 4) * eta * (rr2.tomatrix() / (R2**3) + sy.eye(3) / R2) # +(1/8)*(eta**3)*(2*sy.eye(3)/(R**3)-6*rr.tomatrix()/(R**5)) R3 = r3vec.norm() rr3 = sy.tensorproduct(r3, r3) G3 = (3 / 4) * eta * (rr3.tomatrix() / (R3**2) + sy.eye(3) / R3) # +(1/8)*(eta**3)*(2*sy.eye(3)/(R**3)-6*rr.tomatrix()/(R**5))
from sympy import Array, tensorproduct, init_printing, symbols, pprint, eye x, y, z, t = symbols('x y z t') init_printing(use_unicode=True) A = Array([x, y, z, t]) B = Array([1, 2, 3, 4]) pprint(tensorproduct(A, B))
W_l2tol3 = weights(2, 4, 4) #_weights_between_layers_3_and_4____ W_l3tol4 = weights(3, 4, 2) #_weights_between_layers_4_and_5____ W_l4tol5 = weights(4, 2, 1) #______________________________________________________________________________ #_function_definition_and_input______________ f = lambda x: 1 / (1 + sp.exp(-x)) input = sp.Array([inp1, inp2, inp3, inp4]) #_____________________________________________ #_Outputs_____________________________________________________________________ o_l1 = sp.Array([[f(input[0]), f(input[1]), f(input[2]), f(input[3])]]) o_l2 = sp.Array([ sp.tensorcontraction( sp.tensorproduct(o_l1, W_l1tol2)[0, :, :, :].applyfunc(f), (0, 1)) ]) o_l3 = sp.Array([ sp.tensorcontraction( sp.tensorproduct(o_l2, W_l2tol3)[0, :, :, :].applyfunc(f), (0, 1)) ]) o_l4 = sp.Array([ sp.tensorcontraction( sp.tensorproduct(o_l3, W_l3tol4)[0, :, :, :].applyfunc(f), (0, 1)) ]) o_l5 = sp.Array([ sp.tensorcontraction( sp.tensorproduct(o_l4, W_l4tol5)[0, :, :, :].applyfunc(f), (0, 1)) ]) #______________________________________________________________________________