def test_jacobian(self): C_0, C_1 = symbols('C_0 C_1') state_vars = [C_0, C_1] t = Symbol('t') # linear compartmental matrix # constant input # The result is the compartmental matrix input_fluxes = {0: 50, 1: 60} internal_fluxes = {} output_fluxes = {0: C_0, 1: 6 * C_1} rm = SmoothReservoirModel(state_vars, t, input_fluxes, output_fluxes, internal_fluxes) self.assertEqual(rm.jacobian, -diag(1, 6)) # linear compartmental matrix # but 'linear' state dependent input I=M*C+I0 # The result is J=B+M sv = Matrix(state_vars) B = -diag(1, 6) M = Matrix([[1, 2], [3, 4]]) I0 = Matrix(2, 1, [50, 60]) I = M * sv + I0 rm = SmoothReservoirModel.from_B_u(sv, t, -diag(1, 6), I) self.assertEqual(rm.jacobian, B + M) # non linear compartmental matrix # constant input # The result is NOT the compartmental matrix B state_vars = [C_0] output_fluxes = {0: C_0**3} #input_fluxes = {0:50,1:60} input_fluxes = {} rm = SmoothReservoirModel(state_vars, t, input_fluxes, output_fluxes, internal_fluxes) J = rm.jacobian CM = Matrix([-3 * C_0**2]) # the next line breaks self.assertEqual(J, CM) # non linear compartmental matrix # with (linear) external function in input state_vars = [C_0] output_fluxes = {0: C_0**3} f_expr = Function('f')(t) def f_func(t_val): return t_val func_set = {f_expr: f_func} input_fluxes = {0: C_0 * f_expr} #input_fluxes = {0:50,1:60} internal_fluxes = {} rm = SmoothReservoirModel(state_vars, t, input_fluxes, output_fluxes, internal_fluxes) J = rm.jacobian CM = Matrix([-3 * C_0**2 + f_expr]) self.assertEqual(J, CM)
def sym(): # # Not necessary but gives nice-looking latex output # # More info at: http://docs.sympy.org/latest/tutorial/printing.html # sp.init_printing() # # sx, sy, rho = sp.symbols('sigma_x sigma_y rho') # matrix = sp.Matrix([[sx ** 2, rho * sx * sy], # [rho * sx * sy, sy ** 2]]) # print(sp.pretty(matrix.inv())) # print(sp.pretty(sp.simplify(matrix.inv()))) print('lets go') i = sp.symbols('i') A = sp.Matrix(sp.symarray('a', (6, 3))) sigmas = sp.symbols(' '.join(['s{}'.format(k) for k in range(6)])) Sll = sp.diag(*sigmas) N = A.T * Sll.inv() * A # (sp.pretty(N)) A2 = A.copy() Sll2 = sp.diag(*sigmas) # Sll2[0, 0] = Sll2[0, 0] * (1 - i) N2 = A.T * Sll2**-1 * A # print(sp.pretty(N2)) Sxx = N.inv() Sxx2 = N2.inv() print('OK')
def __init__(self, scheme, output_txt=False): # pylint: disable=unsubscriptable-object self.nvtot = scheme.s.shape[0] self.consm = list(scheme.consm.keys()) self.param = scheme.param self.dim = scheme.dim self.is_stable_l2 = True self.output_txt = output_txt if scheme.rel_vel is None: jacobian = scheme.EQ.jacobian(self.consm) else: jacobian = (scheme.Tu * scheme.EQ).jacobian(self.consm) relax_mat_m = sp.eye(self.nvtot) - sp.diag(*scheme.s) relax_mat_m[:, :len(self.consm)] += sp.diag(*scheme.s) * jacobian if scheme.rel_vel is not None: relax_mat_m = scheme.Tmu * relax_mat_m * scheme.Tu relax_mat_m = relax_mat_m.subs([ (i, j) for i, j in zip([rel_ux, rel_uy, rel_uz], scheme.rel_vel) ]) self.relax_mat_f = scheme.invM * relax_mat_m * scheme.M # alltogether(self.relax_mat_f) velocities = sp.Matrix(scheme.stencil.get_all_velocities()) self.velocities = np.asarray(velocities).astype('float')
def get_matrix_of_converted_atoms(Nu, positions, pending_conversion, natural_influence, Omicron, D): """ :param Nu: A matrix with a shape=(<number of Matters in Universe>, <number of Atoms in Universe>) where each Nu[i,j] stands for how many atoms of type j in matter of type i. :type Nu: Matrix :param ps: positions of matters :type ps: [Matrix] :return: """ x, y = symbols('x y') number_of_matters, number_of_atoms = Nu.shape M = zeros(0, number_of_atoms) if number_of_matters != len(positions): raise Exception("Parameters shapes mismatch.") for (i, position) in enumerate(positions): (a, b) = tuple(position) K = get_conversion_ratio_matrix(pending_conversion, Nu[i, :]) M = M.col_join(((diag(*(ones(1, number_of_atoms)*diag(*K)*Omicron.transpose()))*D).transpose() * natural_influence).transpose().subs({x: a, y: b})) return M.evalf()
def __init__(self, scheme): # pylint: disable=unsubscriptable-object self.nvtot = scheme.s.shape[0] self.consm = list(scheme.consm.keys()) self.param = scheme.param self.dim = scheme.dim self.is_stable_l2 = True if scheme.rel_vel is None: jacobian = scheme.EQ.jacobian(self.consm) else: jacobian = (scheme.Tu * scheme.EQ).jacobian(self.consm) relax_mat_m = sp.eye(self.nvtot) - sp.diag(*scheme.s) relax_mat_m[:, :len(self.consm)] += sp.diag(*scheme.s) * jacobian if scheme.rel_vel is not None: relax_mat_m = scheme.Tmu * relax_mat_m * scheme.Tu relax_mat_m = relax_mat_m.subs( [ (i, j) for i, j in zip( [rel_ux, rel_uy, rel_uz], scheme.rel_vel ) ] ) self.relax_mat_f = scheme.invM * relax_mat_m * scheme.M # alltogether(self.relax_mat_f) velocities = sp.Matrix(scheme.stencil.get_all_velocities()) self.velocities = np.asarray(velocities).astype('float')
def matrix_power(M, n): P, jordan_cells = M.jordan_cells() for j in jordan_cells: jordan_cell_power(j, n) a = P b = sp.diag(*jordan_cells) c = P.inv() return P * sp.diag(*jordan_cells) * P.inv()
def get_refl_transform(nx, ny): nss = nx**2 + ny**2 v = Matrix([[nx, ny]]).T H2 = diag(1,1) - 2*v*v.T / nss H3 = diag(1,1,1) H3[0:2,0:2] = H2 return H3
def _compute_properties(self): """ Computes the macroscopic properties of the solution """ M_pmbr = Matrix([self.endmember_proportions]).T self.site_species_proportions = (self.endmember_site_occupancies.T * M_pmbr) A = self.endmember_site_occupancies.T M_alpha = Matrix([a for a in self.site_species_alphas]) self.endmember_alphas = simplify_matrix(np.array(A.T * M_alpha)) invalphap = Matrix([1. / a for a in self.endmember_alphas[:]]) B = diag(*M_alpha) * A * diag(*invalphap) try: B = simplify_matrix(np.array(B)) except TypeError: pass f = 2. / (np.einsum('i, j -> ij', self.site_species_alphas, np.ones(self.n_site_species)) + np.einsum('i, j -> ij', np.ones(self.n_site_species), self.site_species_alphas)) Wmod = emult(self.site_species_interactions, simplify_matrix(f)) Q = B.T * Wmod * B self.endmember_energies = Matrix(np.ones(self.n_mbrs)) self.endmember_interactions = Q[:, :] for i in range(self.n_mbrs): self.endmember_energies[i] = Q[i, i] * self.endmember_alphas[i] for j in range(i, self.n_mbrs): self.endmember_interactions[i, j] = ( (Q[i, j] + Q[j, i] - Q[i, i] - Q[j, j]) * (self.endmember_alphas[i] + self.endmember_alphas[j]) / 2) self.endmember_interactions[j, i] = 0 # Convert sympy objects to floats if possible # also normalise solution to self.n_sites normalise = symplify(self.n_sites) self.endmember_alphas /= normalise self.endmember_interactions *= normalise self.endmember_energies *= normalise self.endmember_alphas = vector_to_array(self.endmember_alphas) self.endmember_interactions = matrix_to_array( self.endmember_interactions) self.site_species_proportions = vector_to_array( self.site_species_proportions) self.endmember_energies = vector_to_array(self.endmember_energies)
def test_replace_arrays_partial_derivative(): x, y, z, t = symbols("x y z t") expr = PartialDerivative(A(i), A(j)) assert expr.replace_with_arrays({A(i): [x, y]}, [i, j]) == Array([[1, 0], [0, 1]]) expr = PartialDerivative(A(i), A(-i)) assert expr.replace_with_arrays({A(i): [x, y], L: diag(1, 1)}, []) == 2 assert expr.replace_with_arrays({A(i): [x, y], L: diag(1, -1)}, []) == 0 expr = PartialDerivative(A(-i), A(i)) assert expr.replace_with_arrays({A(i): [x, y], L: diag(1, 1)}, []) == 2 assert expr.replace_with_arrays({A(i): [x, y], L: diag(1, -1)}, []) == 0
def get_refl_transform(nx, ny): if DEBUG: print "Transforming normal:" print nx, ny nss = nx**2 + ny**2 v = Matrix([[nx, ny]]).T H2 = diag(1,1) - 2*v*v.T / nss H3 = diag(1,1,1) H3[0:2,0:2] = H2 return H3
def make_state_space_sym(num_signals, num_states, is_homo): mu_rho_dict_sym = {} state_state_sym_dict = {} mu_names = ['mu_'+str(i) for i in range(num_states)] rho_names = ['rho_'+str(i) for i in range(num_states)] mu_rho_names = mu_names + rho_names mu_names_sym = [sympy.Symbol(x) for x in mu_names] rho_names_sym = [sympy.Symbol(x) for x in rho_names] mu_rho_names_sym = [sympy.Symbol(x) for x in mu_rho_names] mu_rho_dict_sym.update(dict(zip(mu_rho_names, mu_rho_names_sym))) A_identity_sym = sympy.eye(num_states) A_rhoblock_sym = sympy.diag(*rho_names_sym) A_sym = sympy.diag(A_identity_sym, A_rhoblock_sym) D_sym_mu_part = sympy.ones(num_signals, num_states) D_sym_zeta_part = sympy.ones(num_signals, num_states) D_sym = D_sym_mu_part.row_join(D_sym_zeta_part) if is_homo: sigmas_signal_names = [ 'sigma_signal_'+str(i) for i in range(num_signals)] sigmas_state_names = ['sigma_state_'+str(i) for i in range(num_states)] sigmas_signal_sym = [sympy.Symbol(x) for x in sigmas_signal_names] sigmas_state_sym = [sympy.Symbol(x) for x in sigmas_state_names] C_nonsingularblock_sym = sympy.diag(*sigmas_state_sym) G_nonsingularblock_sym = sympy.diag(*sigmas_signal_sym) C_singularblock_sym = sympy.zeros(num_states, num_states) G_singularblock_sym = sympy.zeros(num_signals, num_states) G_sym = G_singularblock_sym.row_join(G_nonsingularblock_sym) C_sym = sympy.diag(C_singularblock_sym, C_nonsingularblock_sym) main_matrices_sym = { 'A_z': A_sym, 'C_z': C_sym, 'D_s': D_sym, 'G_s': G_sym} sub_matrices_sym = {'A_z_stable': A_rhoblock_sym, 'C_z_nonsingular': C_nonsingularblock_sym, 'G_s_nonsingular': G_nonsingularblock_sym} state_state_sym_dict.update(main_matrices_sym) state_state_sym_dict.update(sub_matrices_sym) return state_state_sym_dict
def hamilton(M): text = "Metoda Cayleya-Hamiltona<br>" eigen_values, eigen_vectors = eigen(M) text += 'Eigenvalues: $' + sp.latex(eigen_values) + ' = ' + sp.latex( eigen_values.evalf(5)) + '$<br>' eigen_values = eigen_values.evalf(5) x = sp.symbols( str(['a' + str(i) for i in range(len(eigen_values))])[1:-1].replace("'", "")) if len(eigen_values) == 1: x = [x] A = sp.ones(len(x), 1) for i in range(1, len(x)): A = sp.Matrix(sp.BlockMatrix([A, sp.HadamardPower(eigen_values, i)])) b = sp.Matrix(sp.HadamardPower(sp.E, eigen_values * t)) result = sp.Matrix(list(sp.linsolve((A, b), *x))[0]) text += "$"+sp.latex(b)+' = '+sp.latex(A)+'\\bullet'+sp.latex(sp.Matrix(x))+'\\implies' + \ sp.latex(sp.Matrix(x))+' = '+sp.latex(result) + "$<br>" s = sp.diag(*tuple([1] * len(x))) * result[0, 0] for i in range(1, len(x)): s += M * result[i, 0] text += "$e^{\\mathbb{A}t} = " + sp.latex(sp.simplify(s)) + "$<br><br>" return s, text
def Ernst(B=symbols("B"), M=symbols("M")): """ Black holes in a magnetic universe. J. Math. Phys., 17:54–56, 1976. Frederick J. Ernst. Parameters ---------- M : ~sympy.core.basic.Basic or int or float Mass of the black hole. Defaults to ``M``. B : ~sympy.core.basic.Basic or int or float The magnetic field strength Defaults to ``B``. """ coords = symbols("t r theta phi") t, r, th, ph = coords # Helper functions lambd = 1 + ((B * r * sin(th))**2) w = 1 - ((2 * M) / r) # define the metric metric = diag( -1 * (lambd**2) * w, (lambd**2) / w, ((r * lambd)**2), (((r * sin(th)) / lambd)**2), ).tolist() return MetricTensor(metric, coords, "ll")
def _calculate_local_remainder_inhomogeneous(self): r"""Calculate the non-quadratic remainder matrix :math:`W(x) = V(x) - U(x)` of the quadratic approximation matrix :math:`U(x)` of the potential's eigenvalue matrix :math:`\Lambda(x)`. This function is used for the inhomogeneous case. """ if self._remainder_eigen_ih_s is not None: # Calculation already done at some earlier time return else: self._remainder_eigen_ih_s = [] self.calculate_eigenvalues() self.calculate_jacobian() self.calculate_hessian() # Quadratic Taylor series for all eigenvalues quadratics = [] for index, eigenvalue in enumerate(self._eigenvalues_s): # Point q where the Taylor series is computed # This is a column vector q = (q1, ... ,qD) qs = [ sympy.Symbol("q" + str(i)) for i, v in enumerate(self._variables) ] pairs = [(xi, qi) for xi, qi in zip(self._variables, qs)] V = self._eigenvalues_s[index].subs(pairs) J = self._jacobian_s[index].subs(pairs) H = self._hessian_s[index].subs(pairs) # Symbolic expression for the quadratic Taylor expansion term xmq = sympy.Matrix([(xi - qi) for xi, qi in zip(self._variables, qs)]) quadratic = sympy.Matrix( [[V]]) + J.T * xmq + sympy.Rational(1, 2) * xmq.T * H * xmq try: quadratic = quadratic.applyfunc(sympy.simplify) except: pass quadratics.append(quadratic[0, 0]) # Symbolic expression for the Taylor expansion remainder term U = sympy.diag(*quadratics) remainder = self._potential_s - U # Symbolic simplification may fail if self._try_simplify: try: remainder = remainder.applyfunc(sympy.simplify) except: pass self._remainder_eigen_ih_s = remainder # Construct functions to evaluate the approximation at point q at the given nodes self._remainder_eigen_ih_n = tuple( sympy.lambdify(list(self._variables) + qs, entry, "numpy") for entry in remainder)
def get_submatrix(self, matrix): r""" Returns ======= macaulay_submatrix: Matrix The Macaulay denominator matrix. Columns that are non reduced are kept. The row which contains one of the a_{i}s is dropped. a_{i}s are the coefficients of x_i ^ {d_i}. """ reduced, non_reduced = self.get_reduced_nonreduced() # if reduced == [], then det(matrix) should be 1 if reduced == []: return diag([1]) # reduced != [] reduction_set = [v ** self.degrees[i] for i, v in enumerate(self.variables)] ais = list([self.polynomials[i].coeff(reduction_set[i]) for i in range(self.n)]) reduced_matrix = matrix[:, reduced] keep = [] for row in range(reduced_matrix.rows): check = [ai in reduced_matrix[row, :] for ai in ais] if True not in check: keep.append(row) return matrix[keep, non_reduced]
def get_hermitian_base(dim): """ get complete Hermitian basis with a certain dimension return (list of Matrix): a list of Hermitian basis dim (int): dimension of the Hermitian matrix """ if dim==2: return pauli_matrices elif dim==4: return gamma_matrices elif dim==8: return pg_matrices assert dim > 0 base = [sp.diag(*[0]*i,1,*[0]*(dim-i-1)) for i in range(dim)] for row in range(dim): for col in range(row+1, dim): basis = [[0 for i in range(dim)] for j in range(dim)] basis[row][col] = 1 basis = sp.Matrix(basis) base.append(basis+basis.H) basis = [[0 for i in range(dim)] for j in range(dim)] basis[row][col] = sp.nsimplify("I") basis = sp.Matrix(basis) base.append(basis+basis.H) return base
def get_submatrix(self, matrix): r""" Returns ======= macaulay_submatrix: Matrix The Macaulay denominator matrix. Columns that are non reduced are kept. The row which contains one of the a_{i}s is dropped. a_{i}s are the coefficients of x_i ^ {d_i}. """ reduced, non_reduced = self.get_reduced_nonreduced() # if reduced == [], then det(matrix) should be 1 if reduced == []: return diag([1]) # reduced != [] reduction_set = [ v**self.degrees[i] for i, v in enumerate(self.variables) ] ais = list([ self.polynomials[i].coeff(reduction_set[i]) for i in range(self.n) ]) reduced_matrix = matrix[:, reduced] keep = [] for row in range(reduced_matrix.rows): check = [ai in reduced_matrix[row, :] for ai in ais] if True not in check: keep.append(row) return matrix[keep, non_reduced]
def __construct_kinetics(self): """Construct model's dynamics (M, tau_c, tau_g). """ self.logger.debug('__construct_kinetics') # generate the mass matrix [6 x 6] for each body self.M = [ sp.diag(self.m[i], self.m[i], self.m[i], 0, 0, self.Iz[i]) for i in self.dim ] # dummy 0 for 1-based indexing self.M.insert(0, 0) # map spatial to generalized inertia self.M = [self.Jc[i].T * self.M[i] * self.Jc[i] for i in self.dim] # sum the mass product of each body self.M = reduce(lambda x, y: x + y, self.M) self.M = sp.trigsimp(self.M) # Coriolis matrix self.C = sp.trigsimp(coriolis_matrix(self.M, self.Q(), self.QDot())) # Coriolis forces self.tau_c = sp.trigsimp(self.C * sp.Matrix(self.QDot())) # potential energy due to gravity force self.V = 0 for i in self.dim: self.V = self.V + self.m[i] * self.g * self.xc[i][1] self.tau_g = sp.Matrix([sp.diff(self.V, x) for x in self.Q()])
def Kerr(c=constants.c, sch=symbols("r_s"), a=symbols("a")): """ Kerr Metric in Boyer Lindquist coordinates. Parameters ---------- c : ~sympy.core.basic.Basic or int or float Any value to assign to speed of light. Defaults to ``c``. sch : ~sympy.core.basic.Basic or int or float Any value to assign to Schwarzschild Radius of the central object. Defaults to ``r_s``. a : ~sympy.core.basic.Basic or int or float Spin factor of the heavy body. Usually, given by ``J/(Mc)``, where ``J`` is the angular momentum. Defaults to ``a``. """ coords = symbols("t r theta phi") t, r, theta, phi = coords Sigma = r**2 + (a**2 * cos(theta)**2) Delta = r**2 - sch * r + a**2 c2 = c**2 metric = diag( 1 - (sch * r / Sigma), -Sigma / (Delta * c2), -Sigma / c2, -((r**2 + a**2 + (sch * r * (a**2) * (sin(theta)**2) / Sigma)) * (sin(theta)**2)) / c2, ).tolist() metric[0][3] = metric[3][0] = sch * r * a * (sin(theta)**2) / (Sigma * c) return MetricTensor(metric, coords, "ll")
def A(self, p): nx = len(self.variables['x']) nmode = nx // 2 sigma = p[:nmode] omega = p[nmode:nx] A_diag = ([[s, w], [-w, s]] for s, w in zip(sigma, omega)) return sympy.diag(*A_diag)
def _generate_symbolic_relaxation_matrix(self): """ This function replaces the numbers in the relaxation matrix with symbols in this case, and returns also the subexpressions, that assign the number to the newly introduced symbol """ rr = [ self.relaxation_matrix[i, i] for i in range(self.relaxation_matrix.rows) ] unique_relaxation_rates = set() subexpressions = {} for relaxation_rate in rr: if relaxation_rate not in unique_relaxation_rates: relaxation_rate = sp.sympify(relaxation_rate) # special treatment for zero, sp.Zero would be an integer .. if isinstance(relaxation_rate, Zero): relaxation_rate = 0.0 if not isinstance(relaxation_rate, sp.Symbol): rt_symbol = sp.Symbol(f"rr_{len(subexpressions)}") subexpressions[relaxation_rate] = rt_symbol unique_relaxation_rates.add(relaxation_rate) new_rr = [ subexpressions[sp.sympify(e)] if sp.sympify(e) in subexpressions else e for e in rr ] substitutions = [ Assignment(e[1], e[0]) for e in subexpressions.items() ] return substitutions, sp.diag(*new_rr)
def JanisNewmanWinicour( c=constants.c, G=constants.G, gam=symbols("gam"), M=symbols("M") ): """ Reality of the Schwarzschild singularity. Phys. Rev. Lett., 20:878–880, 1968. A. I. Janis, E. T. Newman, and J. Winicour. Parameters ---------- M : ~sympy.core.basic.Basic or int or float Mass parameter, this is used for defining the schwarzschild metric. Defaults to ``M``. gam : ~sympy.core.basic.Basic or int or float Parameter for scaling Schwarzschild radius, for gamma=1 this will return the Schwarzschild metric Defaults to ``gam``. """ coords = symbols("t r theta phi") t, r, th, ph = coords # Helper functions r_s = (2 * G * M) / (c ** 2) alpha = 1 - (r_s / (gam * r)) # define the metric metric = diag( -1 * (alpha ** gam), (alpha ** -gam) / (c ** 2), (r ** 2) * (alpha ** (-gam + 1)), (r ** 2) * (alpha ** (-gam + 1)) * (sin(th) ** 2), ).tolist() return MetricTensor(metric, coords, "ll", name="JanisNewmanWinicourMetric")
def get_random_A(dim=3, valid=True, diagonal=True, ints=True, max_val=1e+3): """ :param dim: dimension of A :param valid: True iff A's eigenvalues are all negative :param diagonal: if True, A will be diagonal :param ints: A will have only integer elements :param max_val: max value of A if diagonal, else the square of the max value :return: A """ max_val = int(max_val) sgn = -1 if valid else 1 r = random.randint if ints else random.uniform diag = set() # generate n unique numbers for x in itertools.takewhile(lambda x: len(diag) < dim, _grand(r, 1, max_val, int=ints)): diag.add(sgn * x) D = sp.diag(*diag) if not diagonal: M = sp.Matrix([[sp.nan]]) while any(x == sp.nan for x in M): # avoid nan (due to X) X = sp.randMatrix(dim, dim, min=-max_val, max=max_val, seed=int(time.time() * 1000)) # eigenvals from D: det(XDX^-1 -tI) = det(X(D-tI)X^-1) = det(D-tI) M = X * D * (X ** -1) return M else: return D # diagonal matrix with -r values
def matrix_decomposition(A: sp.Matrix): """ Decompose a matrix Parameters ---------- A : Matrix 3 x 3 matrix to decompose. Returns ------- Tuple[Matrix, Matrix, Matrix, Matrix] Symmetric, anti-symmetric, hydrostatic, and deviatoric matrix parts. A_sym, A_anti, A_hydro, A_deviator """ A_sym = sp.zeros(*A.shape) A_anti = sp.zeros(*A.shape) for i in range(3): for j in range(3): A_sym[i, j] = (A[i, j] + A[j, i]) / 2 A_anti[i, j] = (A[i, j] - A[j, i]) / 2 A_hydro = A.trace() / 3 A_hydro = sp.diag(*[A_hydro] * 3) A_deviator = A_sym - A_hydro return A_sym, A_anti, A_hydro, A_deviator
def _generate_schwarzschild(): coords = symbols("t r theta phi", real=True) t, r, th, ph = coords schw = diag(1 - 1 / r, -1 / (1 - 1 / r), -r ** 2, -r ** 2 * sin(th) ** 2) g = Metric("g", coords, schw) mu, nu = indices("mu nu", g) return (coords, t, r, th, ph, schw, g, mu, nu)
def _generate_minkowski(): coords = symbols("t x y z", real=True) t, x, y, z = coords mink = diag(1, -1, -1, -1) eta = Metric("eta", coords, mink) mu, nu = indices("mu nu", eta) return (coords, t, x, y, z, mink, eta, mu, nu)
def shift_matrices(matrix): """Create two matrices from input matrix (append row of ones to the beginning of one and the end of the other) in order to find the change between rows (years) Args: matrix (Symbolic Matrix): any matrix Returns: shift_term (Symbolic Matrix): matrix with rows shifted down long_term (Symbolic Matrix): matrix with row appended to end (so that the dimensions will match shift term) TODO: Is a row of ones the best way to do this? """ print('matrix shape', matrix.shape) n = matrix.shape[0] # Length shift = -1 ones_V = [sp.Matrix([0, 1])] + [1] * (n-np.abs(shift) - 1) subdiag = sp.diag(*ones_V) subdiag = subdiag.col_insert(subdiag.shape[1], sp.zeros(subdiag.shape[0], 1)) sp.pprint(subdiag) shift_term = subdiag * matrix return shift_term, matrix
def force_definite(Q): eigvectors, eigvalues = Q.diagonalize() #Q = eigvectors * eigvalues * eigvectors.inv() #fixed_eigvalues = [eigvalues[i,i] if eigvalues[i,i] > 0 else 0 for i in range(Q.shape[0])] #make negative eigvalues zero fixed_eigvalues = [eigvalues[i,i] - min(eigvalues) for i in range(Q.shape[0])] #increase all eigenvalues by same amount such that the smallest is zero. fixed_eigvalues = sympy.diag(*fixed_eigvalues) return eigvectors* fixed_eigvalues * eigvectors.inv()
def are_lc_equiv(g1, g2): """ Tests whether two graphs are equivalent up to local complementation. If True, also returns every unitary such that |g2> = U|g1>. """ # Gets adjacency matrices and returns false if differing bases am1, k1 = get_adjacency_matrix(g1) am2, k2 = get_adjacency_matrix(g2) dim1, dim2 = len(k1), len(k2) if k1 != k2 or am1.shape != (dim1, dim1) or am2.shape != (dim2, dim2): return False, None # Defines binary matrices Id = sp.eye(dim1) S1 = sp.Matrix(am1).col_join(Id) S2 = sp.Matrix(am2).col_join(Id) # Defines symbolic variables A = sp.symbols('a:' + str(dim1), bool=True) B = sp.symbols('b:' + str(dim1), bool=True) C = sp.symbols('c:' + str(dim1), bool=True) D = sp.symbols('d:' + str(dim1), bool=True) # Defines solution matrix basis abcd = flatten(list(zip(A, B, C, D))) no_vars = len(abcd) no_qubits = no_vars // 4 # Creates symbolic binary matrix A, B, C, D = sp.diag(*A), sp.diag(*B), sp.diag(*C), sp.diag(*D) Q = A.row_join(B).col_join(C.row_join(D)) P = sp.zeros(dim1).row_join(Id).col_join(Id.row_join(sp.zeros(dim1))) # Constructs matrix to solve X = [i for i in S1.T * Q.T * P * S2] X = np.array([[x.coeff(v) for v in abcd] for x in X], dtype=int) # Removes any duplicated and all-zero rows X = np.unique(X, axis=0) X = X[~(X == 0).all(1)] # Finds the solutions (the nullspace of X) V = list(GF2nullspace(X)) if len(V) > 4: V = [(v1 + v2) % 2 for v1, v2 in it.combinations(V, 2)] else: V = [sum(vs) % 2 for vs in powerset(V)] V = [np.reshape(v, (no_qubits, 4)) for v in V] V = [v for v in V if all((a * d + b * c) % 2 == 1 for a, b, c, d in v)] if V: V = [[bin2gate[tuple(r)] for r in v] for v in V] return True, V else: return False, None
def __create_symbolic_jacobian_matrix(self): self.__mu_vec = [ sympy.symbols('v_' + str(i + 1)) for i in self.__edges_true_outflow ] diag_mu = sympy.diag(*self.__mu_vec) self.__symbolic_J = self.__S_to * diag_mu * self.__Y_r.T
def _init_matrixes(self): self.X = sm.zeros(rows=self.n_e, cols=1) for edge_index, (edge, edge_info) in enumerate(sorted(self.edges.items(), key=lambda edge_sort_key: int(edge_sort_key[0][0]))): edge_index = list(self.edges()).index(edge) self.X[edge_index] = edge_info['variable_x'] self.A = nx.incidence_matrix(self, oriented=True).todense() self.Q_symbols = sm.Matrix(deepcopy(self.Q)) self.Q = self.A * self.X self.AF = deepcopy(self.A) self.AL = deepcopy(self.A) for row in range(self.AF.shape[0]): for col in range(self.AF.shape[1]): if self.AF[row, col] == -1: self.AF[row, col] = 0 if self.AL[row, col] == 1: self.AL[row, col] = 0 self.DF = sm.zeros(len(self.edges()), 1) self.DL = sm.zeros(len(self.edges()), 1) for equation_index, (edge, edge_info) in enumerate(sorted(self.edges.items(), key=lambda edge_sort_key: edge_sort_key[0][0])): u = edge[0] v = edge[1] p_s = self.nodes[u]['variable_p'] p_f = self.nodes[v]['variable_p'] a = edge_info['A'] eq = sm.sqrt((p_s ** 2 - p_f ** 2) / a) self.DF[equation_index] = sm.diff(eq, p_s) self.DL[equation_index] = -sm.diff(eq, p_f) self.DF = sm.diag(*self.DF) self.DL = sm.diag(*self.DL) self.P = sm.Matrix(self.P) self.Q = sm.Matrix(self.Q) self.dP = sm.Matrix(self.dP) self.dQ = sm.Matrix(self.dQ)
def diag(x): def m(i, j): return x[i, i] try: a = Matrix(x.cols, 1, m) except: a = sympy.diag(*x) return a
def _sympy_diag(args,sargs): vecmat = args[0] if vecmat.returnType().type in ['N','V']: return sympy.diag(*sargs[0]) elif vecmat.returnType().type == 'M': raise NotImplementedError("TODO: Get diagonal of matrix in Sympy") else: raise ValueError("Unknown return type from symbolic Expression")
def __add__(core1, core2): """ Add core1 and core2 and return a new core. Every vectors are concatenated and structure matrices with same labels are diagonally stacked into a big (square) structure matrix. """ assert set(core1.symbs_names) == set(core2.symbs_names) core = Core(label=core1.label) # Concatenate lists of symbols for name in core1.symbs_names: attr1 = getattr(core1, name) attr2 = getattr(core2, name) core.setsymb(name, attr1 + attr2) for vari in core.dims.names: for varj in core.dims.names: Mij1 = getattr(core1, 'M'+vari+varj)() Mij2 = getattr(core2, 'M'+vari+varj)() Mij = types.matrix_types[0](sympy.diag(Mij1, Mij2)) if all(dim > 0 for dim in Mij.shape): set_func = getattr(core, 'set_M'+vari+varj) set_func(Mij) # Concatenate lists of symbols for name in core1.symbs_names: attr1 = getattr(core1, name) attr2 = getattr(core2, name) core.setsymb(name, attr1 + attr2) # Update subs disctionary core.subs = {} core.subs.update(core1.subs) core.subs.update(core2.subs) # Update observers dictionary core.observers.update(core1.observers) core.observers.update(core2.observers) # Set Hamiltonian expression core.setexpr('H', core1.H + core2.H) # Concatenate lists of expressions core.setexpr('z', core1.z + core2.z) core.connectors = core1.connectors + core2.connectors core.force_wnl = core1.force_wnl + core2.force_wnl for vari in core.dims.names: for varj in core.dims.names: Mij = getattr(core, 'M'+vari+varj)() if all(dim > 0 for dim in Mij.shape): set_func = getattr(core, 'set_M'+vari+varj) set_func(Mij) return core
def __add__(core1, core2): """ Add core1 and core2 and return a new core. Every vectors are concatenated and structure matrices with same labels are diagonally stacked into a big (square) structure matrix. """ assert set(core1.symbs_names) == set(core2.symbs_names) core = Core(label=core1.label) # Concatenate lists of symbols for name in core1.symbs_names: attr1 = getattr(core1, name) attr2 = getattr(core2, name) core.setsymb(name, attr1 + attr2) for vari in core.dims.names: for varj in core.dims.names: Mij1 = getattr(core1, 'M' + vari + varj)() Mij2 = getattr(core2, 'M' + vari + varj)() Mij = types.matrix_types[0](sympy.diag(Mij1, Mij2)) if all(dim > 0 for dim in Mij.shape): set_func = getattr(core, 'set_M' + vari + varj) set_func(Mij) # Concatenate lists of symbols for name in core1.symbs_names: attr1 = getattr(core1, name) attr2 = getattr(core2, name) core.setsymb(name, attr1 + attr2) # Update subs disctionary core.subs = {} core.subs.update(core1.subs) core.subs.update(core2.subs) # Update observers dictionary core.observers.update(core1.observers) core.observers.update(core2.observers) # Set Hamiltonian expression core.setexpr('H', core1.H + core2.H) # Concatenate lists of expressions core.setexpr('z', core1.z + core2.z) core.connectors = core1.connectors + core2.connectors core.force_wnl = core1.force_wnl + core2.force_wnl for vari in core.dims.names: for varj in core.dims.names: Mij = getattr(core, 'M' + vari + varj)() if all(dim > 0 for dim in Mij.shape): set_func = getattr(core, 'set_M' + vari + varj) set_func(Mij) return core
def Lambda(self): """Diagonal matrix of eigenvalues.""" # Perhaps faster to use diagonalize # E, L = self.A.diagonalize() # return L e = self.eigenvalues return sMatrix(sym.diag(*e))
def force_definite(Q): eigvectors, eigvalues = Q.diagonalize() #Q = eigvectors * eigvalues * eigvectors.inv() #fixed_eigvalues = [eigvalues[i,i] if eigvalues[i,i] > 0 else 0 for i in range(Q.shape[0])] #make negative eigvalues zero fixed_eigvalues = [ eigvalues[i, i] - min(eigvalues) for i in range(Q.shape[0]) ] #increase all eigenvalues by same amount such that the smallest is zero. fixed_eigvalues = sympy.diag(*fixed_eigvalues) return eigvectors * fixed_eigvalues * eigvectors.inv()
def bravyi_haah_gmat(k): """Tri-orthogonal matrix from Bravyi-Haah """ bigl = np.block([[LMAT] for _ in range(k)]+ [[S1MAT]]) bigm = sympy.diag(*[MMAT for _ in range(k)]) bigs2 = np.block([S2MAT for _ in range(k)]) gmatr = np.block([[bigm], [bigs2]]) gmat = np.block([bigl, gmatr]) return sympy.Matrix(gmat)
def _calculate_local_remainder_component(self, diagonal_component=None): r"""Calculate the non-quadratic remainder :math:`W(x) = V(x) - U(x)` of the quadratic Taylor approximation :math:`U(x)` of the potential's eigenvalue :math:`\lambda_i(x)`. Note that this function is idempotent. :param diagonal_component: Specifies the index :math:`i` of the eigenvalue :math:`\lambda_i` that gets expanded into a Taylor series :math:`u_i`. """ # Calculation already done at some earlier time? if self._remainder_eigen_s.has_key(diagonal_component): return self.calculate_eigenvalues() self.calculate_jacobian() self.calculate_hessian() # Point q where the taylor series is computed # This is a column vector q = (q1, ... ,qD) qs = [sympy.Symbol("q" + str(i)) for i in xrange(len(self._variables))] pairs = [(xi, qi) for xi, qi in zip(self._variables, qs)] V = self._eigenvalues_s[diagonal_component].subs(pairs) J = self._jacobian_s[diagonal_component].subs(pairs) H = self._hessian_s[diagonal_component].subs(pairs) # Symbolic expression for the quadratic Taylor expansion term xmq = sympy.Matrix([(xi - qi) for xi, qi in zip(self._variables, qs)]) quadratic = sympy.Matrix( [[V]]) + J.T * xmq + sympy.Rational(1, 2) * xmq.T * H * xmq # Symbolic simplification may fail if self._try_simplify: try: quadratic = quadratic.applyfunc(sympy.simplify) except: pass # Symbolic expression for the Taylor expansion remainder term U = sympy.diag(*self._number_components * [quadratic[0, 0]]) remainder = self._potential_s - U # Symbolic simplification may fail if self._try_simplify: try: remainder = remainder.applyfunc(sympy.simplify) except: pass self._remainder_eigen_s[diagonal_component] = remainder # Construct functions to evaluate the approximation at point q at the given nodes # The variable ordering in lambdify is [x1, ..., xD, q1, ...., qD] self._remainder_eigen_n[diagonal_component] = tuple([ sympy.lambdify(list(self._variables) + qs, entry, "numpy") for entry in remainder ])
def __init__(self, label, nodes, **kwargs): pars = ['Is', 'betaR', 'betaF', 'Vt', 'mu', 'Rb', 'Rc', 'Re'] for par in pars: assert par in kwargs.keys() Is, betaR, betaF, Vt, mu, Rb, Rc, Re = symbols(pars) # dissipation variable wbjt = symbols(["w"+label+ind for ind in ['bc', 'be']]) # bjt dissipation funcion coeffs = types.matrix_types[0]([[(betaR+1)/betaR, -1], [-1, (betaF+1)/betaF]]) funcs = [Is*(sympy.exp(wbjt[0]/(mu*Vt))-1) + GMIN*wbjt[0], Is*(sympy.exp(wbjt[1]/(mu*Vt))-1) + GMIN*wbjt[1]] zbjt = coeffs*types.matrix_types[0](funcs) # bjt edges data data_bc = {'label': wbjt[0], 'type': 'dissipative', 'ctrl': 'e', 'z': {'e_ctrl': zbjt[0], 'f_ctrl': sympy.sympify(0)}, 'link': None} data_be = {'label': wbjt[1], 'type': 'dissipative', 'z': {'e_ctrl': zbjt[1], 'f_ctrl': sympy.sympify(0)}, 'ctrl': 'e', 'link': None} # connector resistances dissipative functions wR = symbols(["w"+label+ind for ind in ['rb', 'rc', 're']]) Rmat = types.matrix_types[0](sympy.diag(Rb, Rc, Re)) zR = Rmat*types.matrix_types[0](wR) # connector resistances edges data data_rb = {'label': wR[0], 'z': {'e_ctrl': wR[0]/Rb, 'f_ctrl': Rb*wR[0]}, 'type': 'dissipative', 'ctrl': '?', 'link': None} data_rc = {'label': wR[1], 'z': {'e_ctrl': wR[1]/Rc, 'f_ctrl': Rc*wR[1]}, 'type': 'dissipative', 'ctrl': '?', 'link': None} data_re = {'label': wR[2], 'z': {'e_ctrl': wR[2]/Re, 'f_ctrl': Re*wR[2]}, 'type': 'dissipative', 'ctrl': '?', 'link': None} # edge Nb, Nc, Ne = nodes iNb, iNc, iNe = [str(el)+label for el in (Nb, Nc, Ne)] edges = [(iNb, iNc, data_bc), (iNb, iNe, data_be), (Nb, iNb, data_rb), (Nc, iNc, data_rc), (Ne, iNe, data_re)] # init component DissipativeNonLinear.__init__(self, label, edges, wbjt + wR, list(zbjt) + list(zR), **kwargs)
def _calculate_local_remainder_inhomogeneous(self): r"""Calculate the non-quadratic remainder matrix :math:`W(x) = V(x) - U(x)` of the quadratic approximation matrix :math:`U(x)` of the potential's eigenvalue matrix :math:`\Lambda(x)`. This function is used for the inhomogeneous case. """ if self._remainder_eigen_ih_s is not None: # Calculation already done at some earlier time return else: self._remainder_eigen_ih_s = [] self.calculate_eigenvalues() self.calculate_jacobian() self.calculate_hessian() # Quadratic taylor series for all eigenvalues quadratics = [] for index, eigenvalue in enumerate(self._eigenvalues_s): # Point q where the taylor series is computed # This is a column vector q = (q1, ... ,qD) qs = [ sympy.Symbol("q"+str(i)) for i,v in enumerate(self._variables) ] pairs = [ (xi,qi) for xi,qi in zip(self._variables, qs) ] V = self._eigenvalues_s[index].subs(pairs) J = self._jacobian_s[index].subs(pairs) H = self._hessian_s[index].subs(pairs) # Symbolic expression for the quadratic Taylor expansion term xmq = sympy.Matrix([ (xi-qi) for xi,qi in zip(self._variables, qs) ]) quadratic = sympy.Matrix([[V]]) + J.T*xmq + sympy.Rational(1,2)*xmq.T*H*xmq try: quadratic = quadratic.applyfunc(sympy.simplify) except: pass quadratics.append(quadratic[0,0]) # Symbolic expression for the Taylor expansion remainder term U = sympy.diag( *quadratics ) remainder = self._potential_s - U # Symbolic simplification may fail if self._try_simplify: try: remainder = remainder.applyfunc(sympy.simplify) except: pass self._remainder_eigen_ih_s = remainder # Construct functions to evaluate the approximation at point q at the given nodes self._remainder_eigen_ih_n = tuple([ sympy.lambdify(list(self._variables) + qs, entry, "numpy") for entry in remainder ])
def _calculate_local_remainder_component(self, diagonal_component=None): r"""Calculate the non-quadratic remainder :math:`W(x) = V(x) - U(x)` of the quadratic Taylor approximation :math:`U(x)` of the potential's eigenvalue :math:`\lambda_i(x)`. Note that this function is idempotent. :param diagonal_component: Specifies the index :math:`i` of the eigenvalue :math:`\lambda_i` that gets expanded into a Taylor series :math:`u_i`. """ # Calculation already done at some earlier time? if self._remainder_eigen_s.has_key(diagonal_component): return self.calculate_eigenvalues() self.calculate_jacobian() self.calculate_hessian() # Point q where the taylor series is computed # This is a column vector q = (q1, ... ,qD) qs = [ sympy.Symbol("q"+str(i)) for i in xrange(len(self._variables)) ] pairs = [ (xi,qi) for xi,qi in zip(self._variables, qs) ] V = self._eigenvalues_s[diagonal_component].subs(pairs) J = self._jacobian_s[diagonal_component].subs(pairs) H = self._hessian_s[diagonal_component].subs(pairs) # Symbolic expression for the quadratic Taylor expansion term xmq = sympy.Matrix([ (xi-qi) for xi,qi in zip(self._variables, qs) ]) quadratic = sympy.Matrix([[V]]) + J.T*xmq + sympy.Rational(1,2)*xmq.T*H*xmq # Symbolic simplification may fail if self._try_simplify: try: quadratic = quadratic.applyfunc(sympy.simplify) except: pass # Symbolic expression for the Taylor expansion remainder term U = sympy.diag( *self._number_components*[quadratic[0,0]] ) remainder = self._potential_s - U # Symbolic simplification may fail if self._try_simplify: try: remainder = remainder.applyfunc(sympy.simplify) except: pass self._remainder_eigen_s[diagonal_component] = remainder # Construct functions to evaluate the approximation at point q at the given nodes # The variable ordering in lambdify is [x1, ..., xD, q1, ...., qD] self._remainder_eigen_n[diagonal_component] = tuple([ sympy.lambdify(list(self._variables) + qs, entry, "numpy") for entry in remainder ])
def _age_vector_dens(u, B, Qt): """Return the (symbolic) probability density vector of the compartment ages. Args: u (SymPy dx1-matrix): external input vector B (SymPy dxd-matrix): compartment matrix Qt (SymPy dxd-matrix): Qt = :math:`e^{t\\,B}` Returns: SymPy dx1-matrix: probability density vector of the compartment ages :math:`f_a(y) = (X^\\ast)^{-1}\\,e^{y\\,B}\\,u` """ xss = -(B**-1)*u X = diag(*xss) return (X**-1)*Qt*u
def connect(self): """ Effectively connect inputs and outputs defined in core.connectors. See also -------- See help of method :code:`core.add_connector` for details. """ all_alpha = list() # recover connectors and sort cy and cu for i, c in enumerate(self.connectors): all_alpha.append(c['alpha']) i_primal = getattr(self, 'cy').index(c['y'][0]) self.move_connector(i_primal, 2*i) i_dual = getattr(self, 'cy').index(c['y'][1]) self.move_connector(i_dual, 2*i+1) Mswitch_list = [alpha * sympy.Matrix([[0, -1], [1, 0]]) for alpha in all_alpha] Mswitch = types.matrix_types[0](sympy.diag(*Mswitch_list)) nxwy = self.dims.x() + self.dims.w() + self.dims.y() # Gain matrix G_connectors = self.M[:nxwy, nxwy:] # Observation matrix O_connectors = self.M[nxwy:, :nxwy] N_connectors = types.matrix_types[0](sympy.eye(self.dims.cy()) - self.Mcycy() * Mswitch) try: iN_connectors = inverse(N_connectors, dosimplify=False) # Interconnection Matrix due to the connectors M_connectors = G_connectors*Mswitch*iN_connectors*O_connectors # Store new structure self.M = types.matrix_types[0](self.M[:nxwy, :nxwy] + M_connectors) # clean setattr(self, 'cy', list()) setattr(self, 'cu', list()) setattr(self, 'connectors', list()) except ValueError: raise Exception('Can not resolve the connection.\n\nABORD')
def absorbing_jump_chain(self): """Return the absorbing jump chain as a discrete-time Markov chain. The generator of the absorbing chain is just given by :math:`B`, which allows the computation of the transition probability matrix :math:`P` from :math:`B=(P-I)\\,D` with :math:`D` being the diagonal matrix with diagonal entries taken from :math:`-B`. Returns: :class:`~.DTMC.DTMC`: :class:`DTMC` (beta, P) """ # B = (P - I) * D d = self.B.rows D = diag(*[-self.B[j,j] for j in range(d)]) P = self.B * D**(-1) + eye(d) return DTMC(self.beta, P)
def _age_vector_nth_moment(u, B, n): """Return the (symbolic) vector of nth moments of the compartment ages. Args: u (SymPy dx1-matrix): external input vector B (SymPy dxd-matrix): compartment matrix n (positive int): order of the moment Returns: SymPy dx1-matrix: vector of nth moments of the compartment ages :math:`\\mathbb{E}[a^n]=(-1)^n\\,n!\\,X^\\ast)^{-1}\\,B^{-n}\\,x^\\ast` See Also: :func:`_age_vector_exp`: Return the (symbolic) vector of expected values of the compartment ages. """ xss = -(B**-1)*u X = diag(*xss) return (-1)**n*factorial(n)*(X**-1)*(B**-n)*xss
def _age_vector_cum_dist(u, B, Qt): """Return the (symbolic) cumulative distribution function vector of the compartment ages. Args: u (SymPy dx1-matrix): external input vector B (SymPy dxd-matrix): compartment matrix Qt (SymPy dxd-matrix): Qt = :math:`e^{t\\,B}` Returns: SymPy dx1-matrix: cumulative distribution function vector of the compartment ages :math:`f_a(y)=(X^\\ast)^{-1}\\,B^{-1}\\,(e^{y\\,B}-I)\\,u` """ d = B.rows xss = -(B**-1)*u X = diag(*xss) return (X**-1)*(B**-1)*(Qt-eye(d))*u
def compute_vect2_from_pixel(self): # Parameters related to Mirror2: self.k2, self.c2, self.d = symbols("k_2, c_2, d", real=True, positive=True) self.u2, self.v2 = symbols("u_2, v_2", real=True, nonnegative=True) # Viewpoint (focus) position vector: self.f2x, self.f2y, self.f2z = symbols("x_f_2, y_f_2, z_f_2", real=True) # (Coaxial alignment assumption) # where self.f2x = 0 self.f2y = 0 self.f2z = self.d - self.c2 self.f2 = Matrix([self.f2x, self.f2y, self.f2z]) # Reflex mirror's normal vector wrt [C] self.n_ref = Matrix([0, 0, -1]) # Virtual focus self.f2virt = Matrix([self.f2x, self.f2y, self.d]) # where f2virtz = d # Pixel vector self.m2h = Matrix([self.u2, self.v2, 1]) # Point in the normalized projection plane self.q2 = self.Kc_inv * self.m2h # Planar Reflection Matrix (Change of coordinates) self.M_ref = (eye(3) + 2 * diag(*self.n_ref)).row_join(self.f2virt) # Change coordinates from C' to C (according to explanation in journal paper) self.q2v = self.M_ref * self.q2.col_join(eye(1)) # Point in mirror wrt C self.t2 = self.c2 / (self.k2 - self.q2.norm() * sqrt(self.k2 * (self.k2 - 2))) self.p2 = self.f2virt + self.t2 * (self.q2v - self.f2virt) self.p2h = self.p2.col_join(eye(1)) # Transform matrix from C to F1 frame self.T2_CtoF2 = eye(3).row_join(-self.f2) # Direction vector self.d2_vect = self.T2_CtoF2 * self.p2h return self.d2_vect
def __init__(self, equations, u_x_names=[], u_y_names=[], u_z_names=[], x_names=[], w_names=[], param_names=[], block_indices={}, par_to_values_dict={}, fwd_shift_idx=[], aux_eqs=[], vars_initvalues_dict={}, u_trans_dict={}, ss_sol_dict={}): print u_x_names print u_y_names print u_z_names print x_names print w_names ModelBase.__init__(self, equations, x_names, w_names, param_names, par_to_values_dict=par_to_values_dict, vars_initvalues_dict=vars_initvalues_dict, compute_ss=False) u_x_tp1_sym_d = make_basic_sym_dic(u_x_names, 'tp1') u_x_t_sym_d = make_basic_sym_dic(u_x_names, 't') u_x_tm1_sym_d = make_basic_sym_dic(u_x_names, 'tm1') u_y_tp1_sym_d = make_basic_sym_dic(u_y_names, 'tp1') u_y_t_sym_d = make_basic_sym_dic(u_y_names, 't') u_z_tp1_sym_d = make_basic_sym_dic(u_z_names, 'tp1') u_z_t_sym_d = make_basic_sym_dic(u_z_names, 't') u_w_tp1_sym_d = make_basic_sym_dic(w_names, 'tp1') u_w_t_sym_d = make_basic_sym_dic(w_names, 't') self.u_x_tp1_sym = list(sympy.ordered(u_x_tp1_sym_d.values())) self.u_x_t_sym = list(sympy.ordered(u_x_t_sym_d.values())) self.u_x_tm1_sym = list(sympy.ordered(u_x_tm1_sym_d.values())) self.u_y_tp1_sym = list(sympy.ordered(u_y_tp1_sym_d.values())) self.u_y_t_sym = list(sympy.ordered(u_y_t_sym_d.values())) self.u_z_tp1_sym = list(sympy.ordered(u_z_tp1_sym_d.values())) self.u_z_t_sym = list(sympy.ordered(u_z_t_sym_d.values())) self.u_w_tp1_sym = list(sympy.ordered(u_w_tp1_sym_d.values())) self.u_w_t_sym = list(sympy.ordered(u_w_t_sym_d.values())) if fwd_shift_idx != []: equations = self.shift_eqns_fwd(equations, fwd_shift_idx) new_eqs = [x.subs(u_trans_dict) for x in equations] new_eqs.extend(aux_eqs) equations = new_eqs if ss_sol_dict == {}: eqns_no_param = self.make_ss_version_of_eqs(equations) self.ss_solutions_dict = get_sstate_sol_dict_from_sympy_eqs( eqns_no_param, self.x_in_ss_sym, vars_initvalues_dict=vars_initvalues_dict) self.ss_residuals = [x.subs(self.ss_solutions_dict) for x in eqns_no_param] self.block_indices = block_indices # print block_indices non_expec_idx = block_indices['non_expectational_block'] expec_idx = block_indices['expectational_block'] z_idx = block_indices['z_block'] if x_names == []: x_names = u_x_names + u_y_names + u_z_names self.u_param_sym = self.param_sym_d.values() self.u_param_sym = list(sympy.ordered(self.u_param_sym)) self.eqns = equations print "self.eqns" print self.eqns if isinstance(non_expec_idx, int): self.eqns_non_expec = [equations[non_expec_idx]] else: self.eqns_non_expec = [equations[i] for i in non_expec_idx] if isinstance(expec_idx, int): self.eqns_expec = [equations[expec_idx]] else: self.eqns_expec = [equations[i] for i in expec_idx] if isinstance(z_idx, int): self.eqns_z = [equations[z_idx]] else: self.eqns_z = [equations[i] for i in z_idx] print "\nself.eqns_non_expec" print self.eqns_non_expec print "\nself.eqns_expec" print self.eqns_expec print "\nself.eqns_z" print self.eqns_z self.x_to_devss_dict = make_devss_subs_dict(self.x_names, self.x_dates) self.eqns_expec_devss = [x.subs(self.x_to_devss_dict) for x in self.eqns_expec] self.eqns_non_expec_devss = [x.subs(self.x_to_devss_dict) for x in self.eqns_non_expec] self.eqns_z_devss = [x.subs(self.x_to_devss_dict) for x in self.eqns_z] print "\nself.eqns_non_expec_devss" print self.eqns_non_expec_devss print "\nself.eqns_expec_devss" print self.eqns_expec_devss print "\nself.eqns_z_devss" print self.eqns_z_devss self.jacobians_unev, self.jacobians_unev_ss = self.jacobians_sym_uh() self.uA_sym = self.jacobians_unev[0] self.uB_sym = self.jacobians_unev[1] self.uC_sym = self.jacobians_unev[2] self.uD_sym = self.jacobians_unev[3] self.uF_sym = self.jacobians_unev[4] self.uG_sym = self.jacobians_unev[5] self.uH_sym = self.jacobians_unev[6] self.uJ_sym = self.jacobians_unev[7] self.uK_sym = self.jacobians_unev[8] self.uL_sym = self.jacobians_unev[9] self.uM_sym = self.jacobians_unev[10] self.uN_sym = self.jacobians_unev[11] self.uA_sym_ss = self.jacobians_unev_ss[0] self.uB_sym_ss = self.jacobians_unev_ss[1] self.uC_sym_ss = self.jacobians_unev_ss[2] self.uD_sym_ss = self.jacobians_unev_ss[3] self.uF_sym_ss = self.jacobians_unev_ss[4] self.uG_sym_ss = self.jacobians_unev_ss[5] self.uH_sym_ss = self.jacobians_unev_ss[6] self.uJ_sym_ss = self.jacobians_unev_ss[7] self.uK_sym_ss = self.jacobians_unev_ss[8] self.uL_sym_ss = self.jacobians_unev_ss[9] self.uM_sym_ss = self.jacobians_unev_ss[10] self.uN_sym_ss = self.jacobians_unev_ss[11] args = self.x_in_ss_sym + self.u_param_sym self.jac_ss_funcs = sympy.lambdify(args, self.jacobians_unev_ss) x_ss_num = [x.subs(self.ss_solutions_dict) for x in self.x_in_ss_sym] par_ss_num = [x.subs(self.par_to_values_dict) for x in self.param_sym] x_par_ss_num = x_ss_num + par_ss_num self.jac_ss_num = [matrix2numpyfloat(x) for x in self.jac_ss_funcs(*x_par_ss_num)] # # print '\nself.jac_ss_num' # print self.jac_ss_num self.uA_num_ss = self.jac_ss_num[0] self.uB_num_ss = self.jac_ss_num[1] self.uC_num_ss = self.jac_ss_num[2] self.uD_num_ss = self.jac_ss_num[3] # print self.uD_sym # print self.uD_sym_ss # print self.uD_num_ss self.uF_num_ss = self.jac_ss_num[4] self.uG_num_ss = self.jac_ss_num[5] self.uH_num_ss = self.jac_ss_num[6] self.uJ_num_ss = self.jac_ss_num[7] self.uK_num_ss = self.jac_ss_num[8] self.uL_num_ss = self.jac_ss_num[9] self.uM_num_ss = self.jac_ss_num[10] self.uN_num_ss = self.jac_ss_num[11] self.u_x_ss_sym = [x.subs(self.normal_and_0_to_ss) for x in self.u_x_t_sym] self.u_y_ss_sym = [x.subs(self.normal_and_0_to_ss) for x in self.u_y_t_sym] self.u_z_ss_sym = [x.subs(self.normal_and_0_to_ss) for x in self.u_z_t_sym] self.u_x_ss_num = [x.subs(self.ss_solutions_dict) for x in self.u_x_ss_sym] self.u_y_ss_num = [x.subs(self.ss_solutions_dict) for x in self.u_y_ss_sym] self.u_z_ss_num = [x.subs(self.ss_solutions_dict) for x in self.u_z_ss_sym] # self.u_z_ss_num = [0.03, 0.03, 1] # self.u_z_ss_num = [0.03, 1] non_zero_z_idx = np.nonzero(self.u_z_ss_num) z_for_diag = np.ones_like(self.u_z_ss_num) z_for_diag[non_zero_z_idx] = self.u_z_ss_num print "foooooo\n" print self.u_z_ss_num print non_zero_z_idx print z_for_diag print "moooooo\n" self.di_u_x_ss_sym = sympy.diag(*self.u_x_ss_sym) self.di_u_y_ss_sym = sympy.diag(*self.u_y_ss_sym) self.di_u_z_ss_sym = sympy.diag(*self.u_z_ss_sym) self.di_u_x_ss_num = np.diag(self.u_x_ss_num) self.di_u_y_ss_num = np.diag(self.u_y_ss_num) self.di_u_z_ss_num = np.diag(z_for_diag) self.uA_num_ss_log = np.dot(self.uA_num_ss, self.di_u_x_ss_num) self.uB_num_ss_log = np.dot(self.uB_num_ss, self.di_u_x_ss_num) self.uC_num_ss_log = np.dot(self.uC_num_ss, self.di_u_y_ss_num) self.uD_num_ss_log = np.dot(self.uD_num_ss, self.di_u_z_ss_num) self.uF_num_ss_log = np.dot(self.uF_num_ss, self.di_u_x_ss_num) self.uG_num_ss_log = np.dot(self.uG_num_ss, self.di_u_x_ss_num) self.uH_num_ss_log = np.dot(self.uH_num_ss, self.di_u_x_ss_num) self.uJ_num_ss_log = np.dot(self.uJ_num_ss, self.di_u_y_ss_num) self.uK_num_ss_log = np.dot(self.uK_num_ss, self.di_u_y_ss_num) self.uL_num_ss_log = np.dot(self.uL_num_ss, self.di_u_z_ss_num) self.uM_num_ss_log = np.dot(self.uM_num_ss, self.di_u_z_ss_num) self.uN_num_ss_log = np.dot(self.uN_num_ss, self.di_u_z_ss_num)
Omicron = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1]]) D = Matrix([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]]) # Multiplicative component of natural law multiplicative_components = [0, 0, 0, 0, 0] Upsilon = diag(*multiplicative_components) # Additive component of natural law additive_components = [0, 1, 0, 0, 0] S = diag(*additive_components) # Accelerator Alpha = Matrix([[1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0]]) number_of_matters = 3 number_of_atoms = 4
def bend(pts, facets): edges = {} for f in range(0, len(facets)): edges[f] = [] vertices = facets[f] for v in range(0, len(vertices) - 1): edges[f].append([vertices[v], vertices[v + 1]]) edges[f].append([vertices[len(vertices) - 1], vertices[0]]) if DEBUG: print "Edges:" print edges def edges_eq(e1, e2): if e1 == e2 or (e1[0] == e2[1] and e1[1] == e2[0]): return True return False def common_edge(f1, f2): for e1 in edges[f1]: for e2 in edges[f2]: if edges_eq(e1, e2): return e1 return None neighbours = {} for f1 in range(0, len(facets)): neighbours[f1] = [] for f2 in range(0, len(facets)): if f2 == f1: continue if common_edge(f1, f2): neighbours[f1].append(f2) def transform_point(ptr, T): p = pts[ptr] v = Matrix([[p.x], [p.y], [1]]) u = T * v return Point(u[0,0], u[1,0]) def transform_facet(f, T): new_facet = [] for p in facets[f]: new_facet.append(transform_point(p, T)) return new_facet if DEBUG: print "Neighbours:" print neighbours to_process = [] for f in neighbours[0]: to_process.append([f, common_edge(0, f), diag(1, 1, 1)]) processed = {0: diag(1, 1, 1)} while len(to_process) > 0: if DRAW: draw_side_by_side([[pts[p] for p in f] for f in facets], [transform_facet(f, processed[f]) for f in processed]) #draw_side_by_side([], [transform_facet(f, processed[f]) for f in processed]) curr = to_process.pop(0) curr_n = neighbours[curr[0]] curr_t = curr[2] curr_e = curr[1] curr_t = get_shift_transform(-pts[curr_e[0]].x, -pts[curr_e[0]].y) * curr_t curr_t = get_refl_transform( -(pts[curr_e[1]].y - pts[curr_e[0]].y), (pts[curr_e[1]].x - pts[curr_e[0]].x) ) * curr_t curr_t = get_shift_transform(pts[curr_e[0]].x, pts[curr_e[0]].y) * curr_t if DEBUG: print "Curr:" print curr[0] print facets[curr[0]] print "Common edge with parent:" print curr[1] print "Parent matrix:" print curr[2] print "Curr matrix:" print curr_t print "Neighbours:" print curr_n for f in curr_n: if f not in processed: to_process.append([f, common_edge(curr[0], f), curr_t]) processed[curr[0]] = curr_t if DEBUG: print "Processed:" print processed # if DRAW: draw_side_by_side([[pts[p] for p in f] for f in facets], [transform_facet(f, processed[f]) for f in processed]) solution = {} for f in range(0, len(facets)): for p in facets[f]: solution[p] = transform_point(p, processed[f]) print len(pts) for p in pts: print '{},{}'.format(p.x, p.y) print len(facets) for f in facets: facet_str = str(len(f)) facet_str += ' ' + ' '.join(map(str, f)) # for p in f: # facet_str += str(p) + "," # facet_str = facet_str[:-1] print facet_str alpha, beta = get_random_angles() for p in solution: x0, y0 = solution[p].x, solution[p].y x = alpha * x0 + beta * y0 - 1 y = -beta * x0 + alpha * y0 - 2 print "{},{}".format(x, y)
from metric import Metric import sympy as sp x=sp.symbols(['z','t','x','y']) gs=[sp.Symbol('g'+str(i))(x[0]) for i in x] M=Metric(x,sp.diag(*gs)) A=[sp.Symbol(t)(M.x[0]) for t in ['Az','At','Ax','Ay']] der=M.covariantVectorDerivative(A) assert(M.g.is_diagonal()) sp.pprint(sp.simplify(sum(der[i][i]*M.ginv[i,i] for i in range(len(M.x)))))
m20, m21, m22, m23, m30, m31, m32, m33): return mat4x4(m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33).transpose() if __name__ == '__main__': print('行列の構成子のテスト') print('mat4x4(1, 2, ..., 16):\n{0}'.format(mat4x4(*range(1, 17)))) print('tmat4x4(1, 2, ..., 16):\n{0}'.format(tmat4x4(*range(1, 17)))) # ## 拡大縮小行列: $\mathrm {Scale}(s_x, s_y, s_z)$ # In[7]: ScaleSymbols = sp.var('s_x s_y s_z') Symbolic['Scale'] = sp.diag(s_x, s_y, s_z, 1) scale = Symbolic['scale'] = Symbolic['M32'](ScaleSymbols, Symbolic['Scale']) if __name__ == '__main__': md('拡大縮小変換行列$\mathrm {Scale}(s_x, s_y, s_z)$に同次座標を乗ずると,', 'その座標の$X$-, $Y$-, $Z$-成分をそれぞれ$s_x, s_y, s_z$倍した同次座標を与えます.') p = Vec3(x, y, z) md('$$\mathrm {Scale}(s_x, s_y, s_z)', Homogeneous(p), '=', Symbolic['Scale'], Homogeneous(p), '=', (Symbolic['Scale'] * Homogeneous(p)), '$$') # ## 回転変換: Rotate_ # In[8]:
AdSfr=Metric(x, sp.diag(1/f,-f,x[0]**2,x[0]**2)) AdSfr.f=f #used by mcgreevy, hereby shown to be equivalent xn=[sp.Symbol('z',positive=True)]+sp.symbols(['t','x1','x2']) AdSfz=changeOfVars(AdSfr,xn,[1/xn[0]]+xn[1:]) AdSfz.f=sp.Symbol('f')(1/xn[0]) #scwarzchild f w=sp.Wild('w') AdSBHz=Metric(AdSfz.x, AdSfz.g.applyfunc(lambda i:i.replace(sp.Symbol('f')(w),w**2-1/w) ), name='AdSBH') """ zh, L = sp.symbols(["zh", "L"], positive=True) x = [sp.Symbol("z", positive=True)] + sp.symbols(["t", "x1", "x2"]) f = 1 - (x[0] / zh) ** (len(x) - 1) AdSBHz = Metric(x, sp.diag(1 / f, -f, 1, 1) * (L / x[0]) ** 2, "AdSBHz") AdSBHz.zh, AdSBHz.L = (zh, L) zh, L = sp.symbols(["zh", "L"], positive=True) x = [sp.Symbol("z", positive=True)] + sp.symbols(["tau", "x1", "x2"]) f = 1 - (x[0] / zh) ** (len(x) - 1) AdSBHzE = Metric(x, sp.diag(1 / f, f, 1, 1) * (L / x[0]) ** 2, "AdSBHzE") AdSBHzE.zh, AdSBHzE.L = (zh, L) f = sp.Symbol("f")(x[0]) AdSBHf = Metric(x, sp.diag(1 / f, -f, 1, 1) * (L / x[0]) ** 2) AdSBHf.L = L AdSBHf.f = f if __name__ == "__main__": # sp.pprint(AdSfr.R.subs(AdSfr.f,AdSfr.r**2-1/AdSfr.r).doit().ratsimp())
def test_H2(): TP = sympy.diffgeom.TensorProduct R2 = sympy.diffgeom.rn.R2 y = R2.y dy = R2.dy dx = R2.dx g = (TP(dx, dx) + TP(dy, dy))*y**(-2) automat = twoform_to_matrix(g) mat = diag(y**(-2), y**(-2)) assert mat == automat gamma1 = metric_to_Christoffel_1st(g) assert gamma1[0, 0, 0] == 0 assert gamma1[0, 0, 1] == -y**(-3) assert gamma1[0, 1, 0] == -y**(-3) assert gamma1[0, 1, 1] == 0 assert gamma1[1, 1, 1] == -y**(-3) assert gamma1[1, 1, 0] == 0 assert gamma1[1, 0, 1] == 0 assert gamma1[1, 0, 0] == y**(-3) gamma2 = metric_to_Christoffel_2nd(g) assert gamma2[0, 0, 0] == 0 assert gamma2[0, 0, 1] == -y**(-1) assert gamma2[0, 1, 0] == -y**(-1) assert gamma2[0, 1, 1] == 0 assert gamma2[1, 1, 1] == -y**(-1) assert gamma2[1, 1, 0] == 0 assert gamma2[1, 0, 1] == 0 assert gamma2[1, 0, 0] == y**(-1) Rm = metric_to_Riemann_components(g) assert Rm[0, 0, 0, 0] == 0 assert Rm[0, 0, 0, 1] == 0 assert Rm[0, 0, 1, 0] == 0 assert Rm[0, 0, 1, 1] == 0 assert Rm[0, 1, 0, 0] == 0 assert Rm[0, 1, 0, 1] == -y**(-2) assert Rm[0, 1, 1, 0] == y**(-2) assert Rm[0, 1, 1, 1] == 0 assert Rm[1, 0, 0, 0] == 0 assert Rm[1, 0, 0, 1] == y**(-2) assert Rm[1, 0, 1, 0] == -y**(-2) assert Rm[1, 0, 1, 1] == 0 assert Rm[1, 1, 0, 0] == 0 assert Rm[1, 1, 0, 1] == 0 assert Rm[1, 1, 1, 0] == 0 assert Rm[1, 1, 1, 1] == 0 Ric = metric_to_Ricci_components(g) assert Ric[0, 0] == -y**(-2) assert Ric[0, 1] == 0 assert Ric[1, 0] == 0 assert Ric[0, 0] == -y**(-2) assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2)) ## scalar curvature is -2 #TODO - it would be nice to have index contraction built-in R = (Ric[0, 0] + Ric[1, 1])*y**2 assert R == -2 ## Gauss curvature is -1 assert R/2 == -1
Omicron = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1]]) D = Matrix([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]]) # Multiplicative component of natural law multiplicative_components = [0, 0, 0, 0, 0] Upsilon = diag(*multiplicative_components) # Additive component of natural law additive_components = [0, 1, 0, 0, 0] S = diag(*additive_components) # Accelerator Alpha = Matrix([[1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0]]) p = [[1, 4], [6, -1], [1, -6]]
def __init__(self, scheme): # TODO: add source terms t, x, y, z = sp.symbols("t x y z", type='real') consm = list(scheme.consm.keys()) nconsm = len(scheme.consm) self.consm = sp.Matrix(consm) self.dim = scheme.dim space = [x, y, z] LA = scheme.symb_la if not LA: LA = scheme.la func = [] for i in range(nconsm): func.append(sp.Function('f{}'.format(i))(t, x, y, z)) #pylint: disable=not-callable func = sp.Matrix(func) sublist = [(i, j) for i, j in zip(consm, func)] sublist_inv = [(j, i) for i, j in zip(consm, func)] eq_func = sp.Matrix(scheme.EQ[nconsm:]).subs(sublist) s = sp.Matrix(scheme.s[nconsm:]) all_vel = scheme.stencil.get_all_velocities() Lambda = [] for i in range(all_vel.shape[1]): vd = LA*sp.diag(*all_vel[:, i]) Lambda.append(scheme.M*vd*scheme.invM) phi1 = sp.zeros(s.shape[0], 1) #pylint: disable=unsubscriptable-object sigma = sp.diag(*s).inv() - sp.eye(len(s))/2 gamma_1 = sp.zeros(nconsm, 1) self.coeff_order1 = [] for dim, lambda_ in enumerate(Lambda): A, B = sp.Matrix([lambda_[:nconsm, :nconsm]]), sp.Matrix([lambda_[:nconsm, nconsm:]]) C, D = sp.Matrix([lambda_[nconsm:, :nconsm]]), sp.Matrix([lambda_[nconsm:, nconsm:]]) self.coeff_order1.append(A*func + B*eq_func) alltogether(self.coeff_order1[-1], nsimplify=True) for i in range(nconsm): gamma_1[i] += sp.Derivative(self.coeff_order1[-1][i], space[dim]) dummy = -C*func - D*eq_func alltogether(dummy, nsimplify=True) for i in range(dummy.shape[0]): phi1[i] += sp.Derivative(dummy[i], space[dim]) self.coeff_order2 = [[sp.zeros(nconsm) for _ in range(scheme.dim)] for _ in range(scheme.dim)] for dim, lambda_ in enumerate(Lambda): A, B = sp.Matrix([lambda_[:nconsm, :nconsm]]), sp.Matrix([lambda_[:nconsm, nconsm:]]) meq = sp.Matrix(scheme.EQ[nconsm:]) jac = meq.jacobian(consm) jac = jac.subs(sublist) delta1 = jac*gamma_1 phi1_ = phi1 + delta1 sphi1 = B*sigma*phi1_ sphi1 = sphi1.doit() alltogether(sphi1, nsimplify=True) for i in range(scheme.dim): for jc in range(nconsm): for ic in range(nconsm): self.coeff_order2[dim][i][ic, jc] += sphi1[ic].expand().coeff(sp.Derivative(func[jc], space[i])) for ic, c in enumerate(self.coeff_order1): self.coeff_order1[ic] = c.subs(sublist_inv) for ic, c in enumerate(self.coeff_order2): for jc, cc in enumerate(c): self.coeff_order2[ic][jc] = cc.subs(sublist_inv)