# print(a / b) # 0.333 # print(a / b * Decimal('3')) # 0.999 with localcontext() as lc: lc.prec = 50 c = a / b print(c * Decimal('3')) # 1.00000000 # math.sum num_list = [3.21e+18, 1, -3.21e+18] print(sum(num_list)) # 0.0 print(math.fsum(num_list)) # 1.0 # sympy print(math.sqrt(8)) # 2.8284271247461903 print(sympy.sqrt(8)) # 2*sqrt(2) print(type(sympy.Rational(1, 3))) # <class 'sympy.core.numbers.Rational'> # sympy symbols x, y, z = sympy.symbols('x y z') y = x + 1 expr = x**2 + 2 * y print(expr) # x**2 + 2*x + 2 print((x + 3 * z).subs({x: 1, z: 2})) # 7 print(sympy.Eq(x + 1, z)) # Eq(x + 1, z) print(sympy.Eq(x**3, x * x * x)) # True # sympy simplify expr1 = (x + 1)**2 expr2 = x**2 + 2 * x + 1 print(sympy.Eq(expr1, expr2)) # Eq((x + 1)**2, x**2 + 2*x + 1) print(sympy.Eq(sympy.simplify(expr1 - expr2), 0)) # True # Fraction
def Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear( CoordType_in, Sph_r_th_ph_or_Cart_xyz, gammaDD_inSphorCart, KDD_inSphorCart, alpha_inSphorCart, betaU_inSphorCart, BU_inSphorCart): # This routine converts the ADM variables # $$\left\{\gamma_{ij}, K_{ij}, \alpha, \beta^i\right\}$$ # in Spherical or Cartesian basis+coordinates, first to the BSSN variables # in the chosen reference_metric::CoordSystem coordinate system+basis: # $$\left\{\bar{\gamma}_{i j},\bar{A}_{i j},\phi, K, \bar{\Lambda}^{i}, \alpha, \beta^i, B^i\right\},$$ # and then to the rescaled variables: # $$\left\{h_{i j},a_{i j},\phi, K, \lambda^{i}, \alpha, \mathcal{V}^i, \mathcal{B}^i\right\}.$$ # The ADM & BSSN formalisms only work in 3D; they are 3+1 decompositions of Einstein's equations. # To implement axisymmetry or spherical symmetry, simply set all spatial derivatives in # the relevant angular directions to zero; DO NOT SET DIM TO ANYTHING BUT 3. # Step P1: Set spatial dimension (must be 3 for BSSN) DIM = 3 # Step P2: Copy gammaSphDD_in to gammaSphDD, KSphDD_in to KSphDD, etc. # This ensures that the input arrays are not modified below; # modifying them would result in unexpected effects outside # this function. alphaSphorCart = alpha_inSphorCart betaSphorCartU = ixp.zerorank1() BSphorCartU = ixp.zerorank1() gammaSphorCartDD = ixp.zerorank2() KSphorCartDD = ixp.zerorank2() for i in range(DIM): betaSphorCartU[i] = betaU_inSphorCart[i] BSphorCartU[i] = BU_inSphorCart[i] for j in range(DIM): gammaSphorCartDD[i][j] = gammaDD_inSphorCart[i][j] KSphorCartDD[i][j] = KDD_inSphorCart[i][j] # Make sure that rfm.reference_metric() has been called. # We'll need the variables it defines throughout this module. if rfm.have_already_called_reference_metric_function == False: print( "Error. Called Convert_Spherical_ADM_to_BSSN_curvilinear() without" ) print( " first setting up reference metric, by calling rfm.reference_metric()." ) exit(1) # Step 1: All input quantities are in terms of r,th,ph or x,y,z. We want them in terms # of xx0,xx1,xx2, so here we call sympify_integers__replace_rthph() to replace # r,th,ph or x,y,z, respectively, with the appropriate functions of xx0,xx1,xx2 # as defined for this particular reference metric in reference_metric.py's # xxSph[] or xxCart[], respectively: # Note that substitution only works when the variable is not an integer. Hence the # if isinstance(...,...) stuff: def sympify_integers__replace_rthph_or_Cartxyz(obj, rthph_or_xyz, rthph_or_xyz_of_xx): if isinstance(obj, int): return sp.sympify(obj) else: return obj.subs(rthph_or_xyz[0], rthph_or_xyz_of_xx[0]).\ subs(rthph_or_xyz[1], rthph_or_xyz_of_xx[1]).\ subs(rthph_or_xyz[2], rthph_or_xyz_of_xx[2]) r_th_ph_or_Cart_xyz_of_xx = [] if CoordType_in == "Spherical": r_th_ph_or_Cart_xyz_of_xx = rfm.xxSph elif CoordType_in == "Cartesian": r_th_ph_or_Cart_xyz_of_xx = rfm.xxCart else: print( "Error: Can only convert ADM Cartesian or Spherical initial data to BSSN Curvilinear coords." ) exit(1) alphaSphorCart = sympify_integers__replace_rthph_or_Cartxyz( alphaSphorCart, Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx) for i in range(DIM): betaSphorCartU[i] = sympify_integers__replace_rthph_or_Cartxyz( betaSphorCartU[i], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx) BSphorCartU[i] = sympify_integers__replace_rthph_or_Cartxyz( BSphorCartU[i], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx) for j in range(DIM): gammaSphorCartDD[i][ j] = sympify_integers__replace_rthph_or_Cartxyz( gammaSphorCartDD[i][j], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx) KSphorCartDD[i][j] = sympify_integers__replace_rthph_or_Cartxyz( KSphorCartDD[i][j], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx) # Step 2: All ADM initial data quantities are now functions of xx0,xx1,xx2, but # they are still in the Spherical or Cartesian basis. We can now directly apply # Jacobian transformations to get them in the correct xx0,xx1,xx2 basis: # alpha is a scalar, so no Jacobian transformation is necessary. alpha = alphaSphorCart Jac_dUSphorCart_dDrfmUD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): Jac_dUSphorCart_dDrfmUD[i][j] = sp.diff( r_th_ph_or_Cart_xyz_of_xx[i], rfm.xx[j]) Jac_dUrfm_dDSphorCartUD, dummyDET = ixp.generic_matrix_inverter3x3( Jac_dUSphorCart_dDrfmUD) betaU = ixp.zerorank1() BU = ixp.zerorank1() gammaDD = ixp.zerorank2() KDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): betaU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * betaSphorCartU[j] BU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * BSphorCartU[j] for k in range(DIM): for l in range(DIM): gammaDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][ i] * Jac_dUSphorCart_dDrfmUD[l][j] * gammaSphorCartDD[ k][l] KDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][ i] * Jac_dUSphorCart_dDrfmUD[l][j] * KSphorCartDD[k][l] # Step 3: All ADM quantities were input into this function in the Spherical or Cartesian # basis, as functions of r,th,ph or x,y,z, respectively. In Steps 1 and 2 above, # we converted them to the xx0,xx1,xx2 basis, and as functions of xx0,xx1,xx2. # Here we convert ADM quantities to their BSSN Curvilinear counterparts: # Step 3.1: Convert ADM $\gamma_{ij}$ to BSSN $\bar{gamma}_{ij}$: # We have (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)): # \bar{gamma}_{ij} = (\frac{\bar{gamma}}{gamma})^{1/3}*gamma_{ij}. gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) gammabarDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): gammabarDD[i][j] = (rfm.detgammahat / gammaDET)**(sp.Rational( 1, 3)) * gammaDD[i][j] # Step 3.2: Convert the extrinsic curvature K_{ij} to the trace-free extrinsic # curvature \bar{A}_{ij}, plus the trace of the extrinsic curvature K, # where (Eq. 3 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)): # K = gamma^{ij} K_{ij}, and # \bar{A}_{ij} &= (\frac{\bar{gamma}}{gamma})^{1/3}*(K_{ij} - \frac{1}{3}*gamma_{ij}*K) trK = sp.sympify(0) for i in range(DIM): for j in range(DIM): trK += gammaUU[i][j] * KDD[i][j] AbarDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): AbarDD[i][j] = (rfm.detgammahat / gammaDET)**(sp.Rational( 1, 3)) * (KDD[i][j] - sp.Rational(1, 3) * gammaDD[i][j] * trK) # Step 3.3: Define \bar{Lambda}^i (Eqs. 4 and 5 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)): # \bar{Lambda}^i = \bar{gamma}^{jk}(\bar{Gamma}^i_{jk} - \hat{Gamma}^i_{jk}). gammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD) # First compute \bar{Gamma}^i_{jk}: GammabarUDD = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): for l in range(DIM): GammabarUDD[i][j][k] += sp.Rational( 1, 2) * gammabarUU[i][l] * ( sp.diff(gammabarDD[l][j], rfm.xx[k]) + sp.diff(gammabarDD[l][k], rfm.xx[j]) - sp.diff(gammabarDD[j][k], rfm.xx[l])) # Next evaluate \bar{Lambda}^i, based on GammabarUDD above and GammahatUDD # (from the reference metric): LambdabarU = ixp.zerorank1() for i in range(DIM): for j in range(DIM): for k in range(DIM): LambdabarU[i] += gammabarUU[j][k] * (GammabarUDD[i][j][k] - rfm.GammahatUDD[i][j][k]) # Step 3.4: Set the conformal factor variable cf, which is set # by the "BSSN_quantities::EvolvedConformalFactor_cf" parameter. For example if # "EvolvedConformalFactor_cf" is set to "phi", we can use Eq. 3 of # [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf), # which in arbitrary coordinates is written: # phi = \frac{1}{12} log(\frac{gamma}{\bar{gamma}}). # Alternatively if "BSSN_quantities::EvolvedConformalFactor_cf" is set to "chi", then # chi = exp(-4*phi) = exp(-4*\frac{1}{12}*(\frac{gamma}{\bar{gamma}})) # = exp(-\frac{1}{3}*log(\frac{gamma}{\bar{gamma}})) = (\frac{gamma}{\bar{gamma}})^{-1/3}. # # Finally if "BSSN_quantities::EvolvedConformalFactor_cf" is set to "W", then # W = exp(-2*phi) = exp(-2*\frac{1}{12}*log(\frac{gamma}{\bar{gamma}})) # = exp(-\frac{1}{6}*log(\frac{gamma}{\bar{gamma}})) = (\frac{gamma}{bar{gamma}})^{-1/6}. cf = sp.sympify(0) if par.parval_from_str("EvolvedConformalFactor_cf") == "phi": cf = sp.Rational(1, 12) * sp.log(gammaDET / gammabarDET) elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi": cf = (gammaDET / gammabarDET)**(-sp.Rational(1, 3)) elif par.parval_from_str("EvolvedConformalFactor_cf") == "W": cf = (gammaDET / gammabarDET)**(-sp.Rational(1, 6)) else: print("Error EvolvedConformalFactor_cf type = \"" + par.parval_from_str("EvolvedConformalFactor_cf") + "\" unknown.") exit(1) # Step 4: Rescale tensorial quantities according to the prescription described in # the [BSSN in curvilinear coordinates tutorial module](Tutorial-BSSNCurvilinear.ipynb) # (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)): # # h_{ij} = (\bar{gamma}_{ij} - \hat{gamma}_{ij})/(ReDD[i][j]) # a_{ij} = \bar{A}_{ij}/(ReDD[i][j]) # \lambda^i = \bar{Lambda}^i/(ReU[i]) # \mathcal{V}^i &= beta^i/(ReU[i]) # \mathcal{B}^i &= B^i/(ReU[i]) hDD = ixp.zerorank2() aDD = ixp.zerorank2() lambdaU = ixp.zerorank1() vetU = ixp.zerorank1() betU = ixp.zerorank1() for i in range(DIM): lambdaU[i] = LambdabarU[i] / rfm.ReU[i] vetU[i] = betaU[i] / rfm.ReU[i] betU[i] = BU[i] / rfm.ReU[i] for j in range(DIM): hDD[i][j] = (gammabarDD[i][j] - rfm.ghatDD[i][j]) / rfm.ReDD[i][j] aDD[i][j] = AbarDD[i][j] / rfm.ReDD[i][j] #print(sp.mathematica_code(hDD[0][0])) # Step 5: Return the BSSN Curvilinear variables in the desired xx0,xx1,xx2 # basis, and as functions of the consistent xx0,xx1,xx2 coordinates. return cf, hDD, lambdaU, aDD, trK, alpha, vetU, betU
def GiRaFFEfood_NRPy_1D_tests_three_waves(stagger=False): # We'll use reference_metric.py to define x and y x = rfm.xxCart[0] y = rfm.xxCart[1] if stagger: x_p_half = x + sp.Rational(1, 2) * gri.dxx[0] y_p_half = y + sp.Rational(1, 2) * gri.dxx[1] # Now, we can define the vector potential. We will create three copies of this variable, because the potential is uniquely defined in three zones. Data for $x \leq -0.1/\gamma_\mu$ shall be referred to as "left", data for $-0.1/\gamma_\mu \leq x \leq 0.1/\gamma_\mu$ as "center", and data for $x \geq 0.1/\gamma_\mu$ as "right". global AD AD = ixp.zerorank1(DIM=3) import Min_Max_and_Piecewise_Expressions as noif AD[0] = sp.sympify(0) if stagger: AD[1] = sp.Rational(7, 2) * x_p_half * noif.coord_greater_bound( -x_p_half, 0) + sp.sympify( 3) * x_p_half * noif.coord_greater_bound(x_p_half, 0) AD[2] = y_p_half - sp.Rational( 3, 2) * x_p_half * noif.coord_greater_bound( -x_p_half, 0) - sp.sympify( 3) * x_p_half * noif.coord_greater_bound(x_p_half, 0) else: AD[1] = sp.Rational(7, 2) * x * noif.coord_greater_bound( -x, 0) + sp.sympify(3) * x * noif.coord_greater_bound(x, 0) AD[2] = y - sp.Rational(3, 2) * x * noif.coord_greater_bound( -x, 0) - sp.sympify(3) * x * noif.coord_greater_bound(x, 0) # ### Set the vectors $B^i$ and $E^i$ for the velocity # # Now, we will set the magnetic and electric fields that we will need to define the initial velocities. First, we need to define $$f(x)=1+\sin (5\pi x);$$ note that in the definition of $B^i$, we need $f(x')$ where $x'=\gamma_\mu x$. # $$\label{step2}$$ # We will now set the magnetic field in the wave frame: # \begin{align} # B'^{x'}(x') = &\ 1.0,\ B'^y(x') = 1.0, \\ # B'^z(x') = &\ \left \{ \begin{array}{lll} 1.0 & \mbox{if} & x' \leq -0.1 \\ # 1.0+0.15 f(x') & \mbox{if} & -0.1 \leq x' \leq 0.1 \\ # 1.3 & \mbox{if} & x' \geq 0.1 \end{array} \right. . # \end{align} # B_aU = ixp.zerorank1(DIM=3) E_aU = ixp.zerorank1(DIM=3) B_pU = ixp.zerorank1(DIM=3) E_pU = ixp.zerorank1(DIM=3) B_mU = ixp.zerorank1(DIM=3) E_mU = ixp.zerorank1(DIM=3) # if stagger: # B_aU[0] = sp.sympify(1) # B_aU[1] = noif.coord_leq_bound(x_p_half,0) * sp.sympify(1) + noif.coord_greater_bound(x_p_half,0) * sp.Rational(3,2) # B_aU[2] = sp.sympify(2) # E_aU[0] = noif.coord_leq_bound(x_p_half,0) * sp.sympify(-1) + noif.coord_greater_bound(x_p_half,0) * sp.Rational(-3,2) # E_aU[1] = sp.sympify(1) # E_aU[2] = sp.sympify(0) # B_pU[0] = sp.sympify(0) # B_pU[1] = noif.coord_leq_bound(x_p_half,0) * sp.sympify(0) + noif.coord_greater_bound(x_p_half,0) * sp.Rational(3,2) # B_pU[2] = noif.coord_leq_bound(x_p_half,0) * sp.sympify(0) + noif.coord_greater_bound(x_p_half,0) * sp.sympify(1) # E_pU[0] = sp.sympify(0) # E_pU[1] = noif.coord_leq_bound(x_p_half,0) * sp.sympify(0) + noif.coord_greater_bound(x_p_half,0) * sp.sympify(1) # E_pU[2] = noif.coord_leq_bound(x_p_half,0) * sp.sympify(0) + noif.coord_greater_bound(x_p_half,0) * sp.Rational(-3,2) # B_mU[0] = sp.sympify(0) # B_mU[1] = noif.coord_leq_bound(x_p_half,0) * sp.Rational(1,2) + noif.coord_greater_bound(x_p_half,0) * sp.sympify(0) # B_mU[2] = noif.coord_leq_bound(x_p_half,0) * sp.Rational(3,2) + noif.coord_greater_bound(x_p_half,0) * sp.sympify(0) # E_mU[0] = sp.sympify(0) # E_mU[1] = noif.coord_leq_bound(x_p_half,0) * sp.Rational(-3,2) + noif.coord_greater_bound(x_p_half,0) * sp.sympify(0) # E_mU[2] = noif.coord_leq_bound(x_p_half,0) * sp.Rational(1,2) + noif.coord_greater_bound(x_p_half,0) * sp.sympify(0) # else: B_aU[0] = sp.sympify(1) B_aU[1] = noif.coord_leq_bound(x, 0) * sp.sympify( 1) + noif.coord_greater_bound(x, 0) * sp.Rational(3, 2) B_aU[2] = sp.sympify(2) E_aU[0] = noif.coord_leq_bound(x, 0) * sp.sympify( -1) + noif.coord_greater_bound(x, 0) * sp.Rational(-3, 2) E_aU[1] = sp.sympify(1) E_aU[2] = sp.sympify(0) B_pU[0] = sp.sympify(0) B_pU[1] = noif.coord_leq_bound(x, 0) * sp.sympify( 0) + noif.coord_greater_bound(x, 0) * sp.Rational(3, 2) B_pU[2] = noif.coord_leq_bound( x, 0) * sp.sympify(0) + noif.coord_greater_bound(x, 0) * sp.sympify(1) E_pU[0] = sp.sympify(0) E_pU[1] = noif.coord_leq_bound( x, 0) * sp.sympify(0) + noif.coord_greater_bound(x, 0) * sp.sympify(1) E_pU[2] = noif.coord_leq_bound(x, 0) * sp.sympify( 0) + noif.coord_greater_bound(x, 0) * sp.Rational(-3, 2) B_mU[0] = sp.sympify(0) B_mU[1] = noif.coord_leq_bound(x, 0) * sp.Rational( 1, 2) + noif.coord_greater_bound(x, 0) * sp.sympify(0) B_mU[2] = noif.coord_leq_bound(x, 0) * sp.Rational( 3, 2) + noif.coord_greater_bound(x, 0) * sp.sympify(0) E_mU[0] = sp.sympify(0) E_mU[1] = noif.coord_leq_bound(x, 0) * sp.Rational( -3, 2) + noif.coord_greater_bound(x, 0) * sp.sympify(0) E_mU[2] = noif.coord_leq_bound(x, 0) * sp.Rational( 1, 2) + noif.coord_greater_bound(x, 0) * sp.sympify(0) global BU BU = ixp.zerorank1(DIM=3) EU = ixp.zerorank1(DIM=3) for i in range(3): BU[i] = B_aU[i] + B_pU[i] + B_mU[i] EU[i] = E_aU[i] + E_pU[i] + E_mU[i] # <a id='step3'></a> # ### Calculate $v^i$ # # Now, we calculate $$\mathbf{v} = \frac{\mathbf{E} \times \mathbf{B}}{B^2},$$ which is equivalent to $$v^i = [ijk] \frac{E^j B^k}{B^2},$$ where $[ijk]$ is the Levi-Civita symbol and $B^2 = \gamma_{ij} B^i B^j$ is a trivial dot product in flat space. # $$\label{step3}$$ LeviCivitaSymbolDDD = ixp.LeviCivitaSymbol_dim3_rank3() B2 = sp.sympify(0) for i in range(3): # In flat spacetime, gamma_{ij} is just a Kronecker delta B2 += BU[i]**2 # This is trivial to extend to curved spacetime global ValenciavU ValenciavU = ixp.zerorank1() for i in range(3): for j in range(3): for k in range(3): ValenciavU[ i] += LeviCivitaSymbolDDD[i][j][k] * EU[j] * BU[k] / B2
# Author: Dominik Gresch <*****@*****.**> """Tests for utilities handling algebraic expressions.""" import pytest import sympy as sp from kdotp_symmetry._expr_utils import expr_to_vector, monomial_basis, matrix_to_expr_operator kx, ky, kz = sp.symbols('kx, ky, kz') @pytest.mark.parametrize('expr,vector,basis', [ (1 + kx - ky + 2 * kz, (1, 1, -1, 2), [sp.Integer(1), kx, ky, kz]), (kx * ky + kx * ky * kz, (0, 0, 0, 0, 1, 0, 0, 1), [sp.Integer(1), kx, ky, kz, kx * ky, kx * kz, ky * kz, kx * ky * kz]), (1 + sp.Rational(1, 4) * ky, (1, 0, 0.25, 0), [sp.Integer(1), kx, ky, kz]) ]) def test_expr_to_vector(expr, vector, basis): """ Test that an expression is correctly converted into a vector, with a given basis. """ assert expr_to_vector(expr, basis=basis) == vector @pytest.mark.parametrize('expr,basis', [ (1 + kx, [sp.Integer(1), kx, kx, kz]), ]) def test_basis_not_independent(expr, basis): """ Test that an error is raised when the basis is not linearly independent. """
LexerMatcher( lambda code, last: None if last and ('expression' in last.type or 'bracket' in last.type and ')' == last. content) else re.match(r'/((\s*([^)/]|\\.)([^/\\]|\\.)*)?)/', code), lambda code, match: (match.end(), Token('literal:expression', re.compile(match.group(1)))), getlast=True), RegexMatcher(r'\d+b[01]+', 0, 'literal:expression', intify(2)), RegexMatcher(r'\d+o[0-7]+', 0, 'literal:expression', intify(8)), RegexMatcher(r'\d+x[0-9a-fA-F]+', 0, 'literal:expression', intify(16)), RegexMatcher( r'\d+e\d+', 0, 'literal:expression', lambda x: (lambda y: sympy.Integer(y[0]) * 10**sympy.Integer(y[1])) (x.split('e'))), RegexMatcher(r'\d*\.\d+j', 0, 'literal:expression', lambda x: sympy.I * sympy.Rational(x[:-1])), RegexMatcher(r'\d+j', 0, 'literal:expression', lambda x: sympy.I * int(x[:-1])), RegexMatcher(r'\d*\.\d+', 0, 'literal:expression', sympy.Rational), RegexMatcher(r'\d+', 0, 'literal:expression', sympy.Integer), RegexMatcher(r'"([^"\\]|\\.)*"', 0, 'literal:expression', lambda x: x[1:-1]), RegexMatcher(r"'([^'\\]|\\.)*'", 0, 'literal:expression', lambda x: x[1:-1]), ErrorMatcher(RegexMatcher(r'"([^"\\]|\\.)*', 0, ''), UnclosedStringError), ErrorMatcher(RegexMatcher(r"'([^'\\]|\\.)*", 0, ''), UnclosedStringError), RegexMatcher( '(%s)' % '|'.join(['(%s)[^A-Za-z_]' % keyword for keyword in keywords]), 1, 'keyword', lambda x: x[:-1], -1), LexerMatcher(
def mrt_orthogonal_modes_literature(stencil, is_weighted): """ Returns a list of lists of modes, grouped by common relaxation times. This is for commonly used MRT models found in literature. Args: stencil: instance of :class:`lbmpy.stencils.LBStencil`. Can be D2Q9, D3Q15, D3Q19 or D3Q27 is_weighted: whether to use weighted or unweighted orthogonality MRT schemes as described in the following references are used """ x, y, z = MOMENT_SYMBOLS one = sp.Rational(1, 1) if have_same_entries(stencil, LBStencil(Stencil.D2Q9)) and is_weighted: # Reference: # Duenweg, B., Schiller, U. D., & Ladd, A. J. (2007). Statistical mechanics of the fluctuating # lattice Boltzmann equation. Physical Review E, 76(3) sq = x ** 2 + y ** 2 all_moments = [one, x, y, 3 * sq - 2, 2 * x ** 2 - sq, x * y, (3 * sq - 4) * x, (3 * sq - 4) * y, 9 * sq ** 2 - 15 * sq + 2] nested_moments = list(sort_moments_into_groups_of_same_order(all_moments).values()) return nested_moments elif have_same_entries(stencil, LBStencil(Stencil.D3Q15)) and is_weighted: sq = x ** 2 + y ** 2 + z ** 2 nested_moments = [ [one, x, y, z], # [0, 3, 5, 7] [sq - 1], # [1] [3 * sq ** 2 - 9 * sq + 4], # [2] [(3 * sq - 5) * x, (3 * sq - 5) * y, (3 * sq - 5) * z], # [4, 6, 8] [3 * x ** 2 - sq, y ** 2 - z ** 2, x * y, y * z, x * z], # [9, 10, 11, 12, 13] [x * y * z] ] elif have_same_entries(stencil, LBStencil(Stencil.D3Q19)) and is_weighted: # This MRT variant mentioned in the dissertation of Ulf Schiller # "Thermal fluctuations and boundary conditions in the lattice Boltzmann method" (2008), p. 24ff # There are some typos in the moment matrix on p.27 # The here implemented ordering of the moments is however different from that reference (Eq. 2.61-2.63) # The moments are weighted-orthogonal (Eq. 2.58) # Further references: # Duenweg, B., Schiller, U. D., & Ladd, A. J. (2007). Statistical mechanics of the fluctuating # lattice Boltzmann equation. Physical Review E, 76(3) # Chun, B., & Ladd, A. J. (2007). Interpolated boundary condition for lattice Boltzmann simulations of # flows in narrow gaps. Physical review E, 75(6) sq = x ** 2 + y ** 2 + z ** 2 nested_moments = [ [one, x, y, z], # [0, 3, 5, 7] [sq - 1], # [1] [3 * sq ** 2 - 6 * sq + 1], # [2] [(3 * sq - 5) * x, (3 * sq - 5) * y, (3 * sq - 5) * z], # [4, 6, 8] [3 * x ** 2 - sq, y ** 2 - z ** 2, x * y, y * z, x * z], # [9, 11, 13, 14, 15] [(2 * sq - 3) * (3 * x ** 2 - sq), (2 * sq - 3) * (y ** 2 - z ** 2)], # [10, 12] [(y ** 2 - z ** 2) * x, (z ** 2 - x ** 2) * y, (x ** 2 - y ** 2) * z] # [16, 17, 18] ] elif have_same_entries(stencil, LBStencil(Stencil.D3Q27)) and not is_weighted: xsq, ysq, zsq = x ** 2, y ** 2, z ** 2 all_moments = [ sp.Rational(1, 1), # 0 x, y, z, # 1, 2, 3 x * y, x * z, y * z, # 4, 5, 6 xsq - ysq, # 7 (xsq + ysq + zsq) - 3 * zsq, # 8 (xsq + ysq + zsq) - 2, # 9 3 * (x * ysq + x * zsq) - 4 * x, # 10 3 * (xsq * y + y * zsq) - 4 * y, # 11 3 * (xsq * z + ysq * z) - 4 * z, # 12 x * ysq - x * zsq, # 13 xsq * y - y * zsq, # 14 xsq * z - ysq * z, # 15 x * y * z, # 16 3 * (xsq * ysq + xsq * zsq + ysq * zsq) - 4 * (xsq + ysq + zsq) + 4, # 17 3 * (xsq * ysq + xsq * zsq - 2 * ysq * zsq) - 2 * (2 * xsq - ysq - zsq), # 18 3 * (xsq * ysq - xsq * zsq) - 2 * (ysq - zsq), # 19 3 * (xsq * y * z) - 2 * (y * z), # 20 3 * (x * ysq * z) - 2 * (x * z), # 21 3 * (x * y * zsq) - 2 * (x * y), # 22 9 * (x * ysq * zsq) - 6 * (x * ysq + x * zsq) + 4 * x, # 23 9 * (xsq * y * zsq) - 6 * (xsq * y + y * zsq) + 4 * y, # 24 9 * (xsq * ysq * z) - 6 * (xsq * z + ysq * z) + 4 * z, # 25 27 * (xsq * ysq * zsq) - 18 * (xsq * ysq + xsq * zsq + ysq * zsq) + 12 * (xsq + ysq + zsq) - 8, # 26 ] nested_moments = list(sort_moments_into_groups_of_same_order(all_moments).values()) else: raise NotImplementedError("No MRT model is available (yet) for this stencil. " "Create a custom MRT using 'create_with_discrete_maxwellian_eq_moments'") return nested_moments
def calc_pe(self, p): return sym.Rational(1, 2) * self.__k * self.__z**2
def run(dx, Tf, generator="cython", sorder=None, with_plot=True): """ Parameters ---------- dx: double spatial step Tf: double final time generator: pylbm generator sorder: list storage order with_plot: boolean if True plot the solution otherwise just compute the solution """ # parameters T0 = .5 Tin = -.5 xmin, xmax, ymin, ymax = 0., 1., 0., 1. Ra = 2000 Pr = 0.71 Ma = 0.01 alpha = .005 la = 1. # velocity of the scheme rhoo = 1. g = 9.81 uo = 0.025 nu = np.sqrt(Pr * alpha * 9.81 * (T0 - Tin) * (ymax - ymin) / Ra) kappa = nu / Pr eta = nu #print nu, kappa snu = 1. / (.5 + 3 * nu) seta = 1. / (.5 + 3 * eta) sq = 8 * (2 - snu) / (8 - snu) se = seta sf = [0., 0., 0., seta, se, sq, sq, snu, snu] #print sf a = .5 skappa = 1. / (.5 + 10 * kappa / (4 + a)) #skappa = 1./(.5+np.sqrt(3)/6) se = 1. / (.5 + np.sqrt(3) / 3) snu = se sT = [0., skappa, skappa, se, snu] #print sT dico = { 'box': { 'x': [xmin, xmax], 'y': [ymin, ymax], 'label': [1, 2, 0, 0] }, 'elements': [ pylbm.Parallelogram([xmin, ymin], [.1, 0], [0, .8], label=0), pylbm.Parallelogram([xmax, ymin], [-.1, 0], [0, .8], label=0), ], 'space_step': dx, 'scheme_velocity': la, 'schemes': [ { 'velocities': list(range(9)), 'conserved_moments': [rho, qx, qy], 'polynomials': [ 1, X, Y, 3 * (X**2 + Y**2) - 4, sp.Rational(1, 2) * (9 * (X**2 + Y**2)**2 - 21 * (X**2 + Y**2) + 8), 3 * X * (X**2 + Y**2) - 5 * X, 3 * Y * (X**2 + Y**2) - 5 * Y, X**2 - Y**2, X * Y ], 'relaxation_parameters': sf, 'equilibrium': [ rho, qx, qy, -2 * rho + 3 * (qx**2 + qy**2), rho - 3 * (qx**2 + qy**2), -qx, -qy, qx**2 - qy**2, qx * qy ], 'source_terms': { qy: alpha * g * T }, }, { 'velocities': list(range(5)), 'conserved_moments': T, 'polynomials': [1, X, Y, 5 * (X**2 + Y**2) - 4, (X**2 - Y**2)], 'equilibrium': [T, T * qx, T * qy, a * T, 0.], 'relaxation_parameters': sT, }, ], 'init': { rho: 1., qx: 0., qy: 0., T: (init_T, (T0, )) }, 'boundary_conditions': { 0: { 'method': { 0: pylbm.bc.BouzidiBounceBack, 1: pylbm.bc.BouzidiAntiBounceBack }, 'value': (bc, (T0, )) }, 1: { 'method': { 0: pylbm.bc.BouzidiBounceBack, 1: pylbm.bc.BouzidiAntiBounceBack }, 'value': (bc_in, (T0, Tin, ymax, rhoo, uo)) }, 2: { 'method': { 0: pylbm.bc.NeumannX, 1: pylbm.bc.NeumannX }, }, }, 'generator': generator, } sol = pylbm.Simulation(dico) if with_plot: # create the viewer to plot the solution viewer = pylbm.viewer.matplotlib_viewer fig = viewer.Fig() ax = fig[0] im = ax.image(sol.m[T].transpose(), cmap='jet', clim=[Tin, T0]) ax.title = 'solution at t = {0:f}'.format(sol.t) ax.polygon([[xmin / dx, ymin / dx], [xmin / dx, (ymin + .8) / dx], [(xmin + .1) / dx, (ymin + .8) / dx], [(xmin + .1) / dx, ymin / dx]], 'k') ax.polygon([[ (xmax - .1) / dx, ymin / dx ], [(xmax - .1) / dx, (ymin + .8) / dx], [xmax / dx, (ymin + .8) / dx], [xmax / dx, ymin / dx]], 'k') def update(iframe): nrep = 64 for i in range(nrep): sol.one_time_step() im.set_data(sol.m[T].transpose()) ax.title = 'temperature at t = {0:f}'.format(sol.t) fig.animate(update, interval=1) fig.show() else: while sol.t < Tf: sol.one_time_step() return sol
SAVE_PLOTS = True SAVE_PREFIX = "plots/" DO_EXAMPLE = False PLOTS = ["autarky", "relative", "trade"] CLOSEUP_RELATIVE = True SHOW_GRAPHS = False if DO_EXAMPLE: save_dir = SAVE_PREFIX + "example/" if SAVE_PLOTS: os.makedirs(save_dir, exist_ok=True) example = Economy( 'Honolulu', # name of the trading agent 'pv', 'wine', # name of goods (x, y) q_pv**sp.Rational(1, 2) * q_wine**(sp.Rational(1, 2)), # utility make_ellipsoid_ppf(170, 170), # ppf [0.2, 0.2, 0.2] # color for plotting ) autarky_plot = example.plot_autarky((0, 200)) if SHOW_GRAPHS: autarky_plot.show() utility_plot = sp.plotting.plot3d(example.utility, (qx, 0, 200), (dy, 0, 200), show=False) utility_plot.save(save_dir + 'utility.png') if SHOW_GRAPHS: utility_plot.show() production_costs_plot = sp.plotting.plot(example.ppf, (qx, 0, 200), show=False)
# state variables _s = sympy.Symbol("s", real=True, positive=True) _m = sympy.Symbol("m", real=True, positive=True) # fundamental equation _U = sympy.Symbol("U", real=True, positive=True) # equations of state _θ = sympy.Symbol("θ", real=True, positive=True) _π = sympy.Symbol("π", real=True, positive=True) _μ = sympy.Symbol("μ", real=True, positive=True) functionals = { # internal energy U(s, m) _U: 3 / (4 * sympy.pi) * sympy.exp(-sympy.Rational(5, 3)) * h**2 * _m_a**-sympy.Rational(8, 3) * _m**sympy.Rational(5, 3) * sympy.exp(sympy.Rational(2, 3) * _m_a / k_B * _s / _m), # temperature θ(s, m) _θ: sympy.exp(-sympy.Rational(5, 3)) / (2 * sympy.pi) * h**2 / k_B * _m_a**-sympy.Rational(5, 3) * _m**sympy.Rational(2, 3) * sympy.exp(sympy.Rational(2, 3) * _m_a / k_B * _s / _m), # pressure π(s, m) _π: sympy.exp(-sympy.Rational(5, 3)) / (2 * sympy.pi) * h**2 * _m_a**-sympy.Rational(8, 3) * _m**sympy.Rational(5, 3) * sympy.exp(sympy.Rational(2, 3) * _m_a / k_B * _s / _m), # chemical potential μ(s, m) _μ: sympy.exp(-sympy.Rational(5, 3)) / (4 * sympy.pi) * h**2 / k_B *
def diracEigenvalues(self, n): return sy.Rational((2 * n + 1), 2)
def prefactor(self, n): return sy.factorial(n + 2) / (8 * si.pi**(sy.Rational(3, 2)) * si.gamma('%d/%d' % (3 + 2 * n, 2)))
def GiRaFFE_NRPy_C2P(StildeD,BU,gammaDD,betaU,alpha): GRHD.compute_sqrtgammaDET(gammaDD) gammaUU,unusedgammadet = ixp.symm_matrix_inverter3x3(gammaDD) BtildeU = ixp.zerorank1() for i in range(3): # \tilde{B}^i = B^i \sqrt{\gamma} BtildeU[i] = GRHD.sqrtgammaDET*BU[i] BtildeD = ixp.zerorank1() for i in range(3): for j in range(3): BtildeD[j] += gammaDD[i][j]*BtildeU[i] Btilde2 = sp.sympify(0) for i in range(3): Btilde2 += BtildeU[i]*BtildeD[i] global outStildeD outStildeD = StildeD # Then, enforce the orthogonality: if par.parval_from_str("enforce_orthogonality_StildeD_BtildeU"): StimesB = sp.sympify(0) for i in range(3): StimesB += StildeD[i]*BtildeU[i] for i in range(3): # {\tilde S}_i = {\tilde S}_i - ({\tilde S}_j {\tilde B}^j) {\tilde B}_i/{\tilde B}^2 outStildeD[i] -= StimesB*BtildeD[i]/Btilde2 # Calculate \tilde{S}^2: Stilde2 = sp.sympify(0) for i in range(3): for j in range(3): Stilde2 += gammaUU[i][j]*outStildeD[i]*outStildeD[j] # First we need to compute the factor f: # f = \sqrt{(1-\Gamma_{\max}^{-2}){\tilde B}^4/(16 \pi^2 \gamma {\tilde S}^2)} speed_limit_factor = sp.sqrt((sp.sympify(1)-GAMMA_SPEED_LIMIT**(-2.0))*Btilde2*Btilde2*sp.Rational(1,16)/\ (M_PI*M_PI*GRHD.sqrtgammaDET*GRHD.sqrtgammaDET*Stilde2)) import Min_Max_and_Piecewise_Expressions as noif # Calculate B^2 B2 = sp.sympify(0) for i in range(3): for j in range(3): B2 += gammaDD[i][j]*BU[i]*BU[j] # Enforce the speed limit on StildeD: if par.parval_from_str("enforce_speed_limit_StildeD"): for i in range(3): outStildeD[i] *= noif.min_noif(sp.sympify(1),speed_limit_factor) global ValenciavU ValenciavU = ixp.zerorank1() # Recompute 3-velocity: for i in range(3): for j in range(3): # \bar{v}^i = 4 \pi \gamma^{ij} {\tilde S}_j / (\sqrt{\gamma} B^2) ValenciavU[i] += sp.sympify(4)*M_PI*gammaUU[i][j]*outStildeD[j]/(GRHD.sqrtgammaDET*B2) # This number determines how far away (in grid points) we will apply the fix. grid_points_from_z_plane = par.Cparameters("REAL",thismodule,"grid_points_from_z_plane",4.0) if par.parval_from_str("enforce_current_sheet_prescription"): # Calculate the drift velocity driftvU = ixp.zerorank1() for i in range(3): driftvU[i] = alpha*ValenciavU[i] - betaU[i] # The direct approach, used by the original GiRaFFE: # v^z = -(\gamma_{xz} v^x + \gamma_{yz} v^y) / \gamma_{zz} newdriftvU2 = -(gammaDD[0][2]*driftvU[0] + gammaDD[1][2]*driftvU[1])/gammaDD[2][2] # Now that we have the z component, it's time to substitute its Valencia form in. # Remember, we only do this if abs(z) < (k+0.01)*dz. Note that we add 0.01; this helps # avoid floating point errors and division by zero. This is the same as abs(z) - (k+0.01)*dz<0 coord = nrpyAbs(rfm.xx[2]) bound =(grid_points_from_z_plane+sp.Rational(1,100))*gri.dxx[2] ValenciavU[2] = noif.coord_leq_bound(coord,bound)*(newdriftvU2+betaU[2])/alpha \ + noif.coord_greater_bound(coord,bound)*ValenciavU[2]
def __init__(self): self.num_lines, self.num_marks = 12, 3 lb = random.randint(-2, -1) keys = list(range(lb, lb + 4)) self._qp = {} while True: # ensure we will have only one positive root for k pos_root_numerator = random.randint(1, 3) pos_root_denom = pos_root_numerator + random.randint(1, 2) neg_root_numerator = random.randint(3, 7) neg_root_denom = neg_root_numerator + random.randint(1, 2) quadratic = ((pos_root_denom * k - pos_root_numerator) * (neg_root_denom * k + neg_root_numerator) / neg_root_denom).expand() self._qp['quadratic_shrinkage_factor'] = neg_root_denom self._qp['quadratic_solutions'] = ( sympy.Rational(pos_root_numerator, pos_root_denom), sympy.Rational(-neg_root_numerator, neg_root_denom) ) self._qp['answer'] = sympy.Rational(pos_root_numerator, pos_root_denom) # because we will be making sure that the sum of probabilities = 1, to preserve our pre-made solution we will add 1 to the quadratic quadratic += 1 # now we want to take partitions of this quadratic (e.g. p^2 / 4, or (4p + 1) / 8 and have them be the values for the probability table) # let's say that we can either take a partition of the p^2 component, or of the (p^1 + p^0) component match = quadratic.match(coeff0 * k ** 2 + coeff1 * k + coeff2) if match[coeff1] == 0 or match[coeff2] == 0 or match[coeff0].p > 16 or match[coeff1].p > 16 or match[coeff2].p > 16: # we want non-zero coefficients for all powers of k, as well as small coefficients so the partition algorithm # doesn't take ages trying to compute them all continue # randomly partition the different parts of the quadratic to the different cells of the probability table possible_square_partitions = [i for i in not_named_yet.partition(abs(match[coeff0].p), include_zero=True) if 1 < len(i) < len(keys)] possible_linear_partitions = [i for i in not_named_yet.partition(abs(match[coeff1].p), include_zero=True) if 1 < len(i) < len(keys)] possible_zeroth_partitions = [i for i in not_named_yet.partition(abs(match[coeff2].p), include_zero=True) if 1 < len(i) < len(keys)] p_square_partition = list(random.choice(possible_square_partitions)) p_linear_partition = list(random.choice(possible_linear_partitions)) p_zeroth_partition = list(random.choice(possible_zeroth_partitions)) p_square_partition += [0] * (len(keys) - len(p_square_partition)) p_linear_partition += [0] * (len(keys) - len(p_linear_partition)) p_zeroth_partition += [0] * (len(keys) - len(p_zeroth_partition)) random.shuffle(p_square_partition) random.shuffle(p_linear_partition) random.shuffle(p_zeroth_partition) if match[coeff0].could_extract_minus_sign(): p_square_partition = [-i for i in p_square_partition] if match[coeff1].could_extract_minus_sign(): p_linear_partition = [-i for i in p_linear_partition] if match[coeff2].could_extract_minus_sign(): p_zeroth_partition = [-i for i in p_zeroth_partition] partitions = list(zip(p_square_partition, p_linear_partition, p_zeroth_partition)) probabilities = [partition[0] * self._qp['answer'] ** 2 + partition[1] * self._qp['answer'] + partition[2] for partition in partitions] if not all([probability > 0 for probability in probabilities]): continue # some probabilities are negative if all([partition.count(0) != 3 for partition in partitions]): break values = [partition[0] * k ** 2 / match[coeff0].q + partition[1] * k / match[coeff1].q + sympy.Rational(partition[2], match[coeff2].q) for partition in partitions] self._qp['prob_table'] = OrderedDict(list(zip(keys, values)))
def set_zeroth_moment_relaxation_rate(self, relaxation_rate): e = sp.Rational(1, 1) prev_entry = self._cumulant_to_relaxation_info_dict[e] new_entry = RelaxationInfo(prev_entry[0], relaxation_rate) self._cumulant_to_relaxation_info_dict[e] = new_entry
# # License: BSD 3 clause """ Example of a nine velocities scheme for Navier-Stokes equations """ import sympy as sp import pyLBM X, Y, LA = sp.symbols('X, Y, LA') rho, qx, qy = sp.symbols('rho, qx, qy') rhoo, ux, uy = sp.symbols('rhoo, ux, uy') sigma_mu, sigma_eta = sp.symbols('sigma_mu, sigma_eta') rhoo_num = 1. la = 1. s3 = 1 / (sigma_mu + sp.Rational(1, 2)) s4 = s3 s5 = s4 s6 = s4 s7 = 1 / (sigma_eta + sp.Rational(1, 2)) s8 = s7 s = [0., 0., 0., s3, s4, s5, s6, s7, s8] dummy = 1 / (LA**2 * rhoo) qx2 = dummy * qx**2 qy2 = dummy * qy**2 q2 = qx2 + qy2 qxy = dummy * qx * qy dico = { 'parameters': {
def BSSN_RHSs(): # Step 1.c: Given the chosen coordinate system, set up # corresponding reference metric and needed # reference metric quantities # The following function call sets up the reference metric # and related quantities, including rescaling matrices ReDD, # ReU, and hatted quantities. rfm.reference_metric() global have_already_called_BSSN_RHSs_function # setting to global enables other modules to see updated value. have_already_called_BSSN_RHSs_function = True # Step 1.d: Set spatial dimension (must be 3 for BSSN, as BSSN is # a 3+1-dimensional decomposition of the general # relativistic field equations) DIM = 3 # Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors Bq.BSSN_basic_tensors() gammabarDD = Bq.gammabarDD AbarDD = Bq.AbarDD LambdabarU = Bq.LambdabarU trK = Bq.trK alpha = Bq.alpha betaU = Bq.betaU # Step 1.f: Import all needed rescaled BSSN tensors: cf = Bq.cf lambdaU = Bq.lambdaU # Step 2.a.i: Import derivative expressions for betaU defined in the BSSN.BSSN_quantities module: Bq.betaU_derivs() betaU_dD = Bq.betaU_dD betaU_dDD = Bq.betaU_dDD # Step 2.a.ii: Import derivative expression for gammabarDD Bq.gammabar__inverse_and_derivs() gammabarDD_dupD = Bq.gammabarDD_dupD # Step 2.a.iii: First term of \partial_t \bar{\gamma}_{i j} right-hand side: # \beta^k \bar{\gamma}_{ij,k} + \beta^k_{,i} \bar{\gamma}_{kj} + \beta^k_{,j} \bar{\gamma}_{ik} gammabar_rhsDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): for k in range(DIM): gammabar_rhsDD[i][j] += betaU[k] * gammabarDD_dupD[i][j][k] + betaU_dD[k][i] * gammabarDD[k][j] \ + betaU_dD[k][j] * gammabarDD[i][k] # Step 2.b.i: First import \bar{A}_{ij} = AbarDD[i][j], and its contraction trAbar = \bar{A}^k_k # from BSSN.BSSN_quantities Bq.AbarUU_AbarUD_trAbar_AbarDD_dD() trAbar = Bq.trAbar # Step 2.b.ii: Import detgammabar quantities from BSSN.BSSN_quantities: Bq.detgammabar_and_derivs() detgammabar = Bq.detgammabar detgammabar_dD = Bq.detgammabar_dD # Step 2.b.ii: Compute the contraction \bar{D}_k \beta^k = \beta^k_{,k} + \frac{\beta^k \bar{\gamma}_{,k}}{2 \bar{\gamma}} Dbarbetacontraction = sp.sympify(0) for k in range(DIM): Dbarbetacontraction += betaU_dD[k][k] + betaU[k] * detgammabar_dD[k] / (2 * detgammabar) # Step 2.b.iii: Second term of \partial_t \bar{\gamma}_{i j} right-hand side: # \frac{2}{3} \bar{\gamma}_{i j} \left (\alpha \bar{A}_{k}^{k} - \bar{D}_{k} \beta^{k}\right ) for i in range(DIM): for j in range(DIM): gammabar_rhsDD[i][j] += sp.Rational(2, 3) * gammabarDD[i][j] * (alpha * trAbar - Dbarbetacontraction) # Step 2.c: Third term of \partial_t \bar{\gamma}_{i j} right-hand side: # -2 \alpha \bar{A}_{ij} for i in range(DIM): for j in range(DIM): gammabar_rhsDD[i][j] += -2 * alpha * AbarDD[i][j] # Step 3.a: First term of \partial_t \bar{A}_{i j}: # \beta^k \partial_k \bar{A}_{ij} + \partial_i \beta^k \bar{A}_{kj} + \partial_j \beta^k \bar{A}_{ik} # First define AbarDD_dupD: AbarDD_dupD = Bq.AbarDD_dupD # From Bq.AbarUU_AbarUD_trAbar_AbarDD_dD() Abar_rhsDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): for k in range(DIM): Abar_rhsDD[i][j] += betaU[k] * AbarDD_dupD[i][j][k] + betaU_dD[k][i] * AbarDD[k][j] \ + betaU_dD[k][j] * AbarDD[i][k] # Step 3.b: Second term of \partial_t \bar{A}_{i j}: # - (2/3) \bar{A}_{i j} \bar{D}_{k} \beta^{k} - 2 \alpha \bar{A}_{i k} {\bar{A}^{k}}_{j} + \alpha \bar{A}_{i j} K gammabarUU = Bq.gammabarUU # From Bq.gammabar__inverse_and_derivs() AbarUD = Bq.AbarUD # From Bq.AbarUU_AbarUD_trAbar() for i in range(DIM): for j in range(DIM): Abar_rhsDD[i][j] += -sp.Rational(2, 3) * AbarDD[i][j] * Dbarbetacontraction + alpha * AbarDD[i][j] * trK for k in range(DIM): Abar_rhsDD[i][j] += -2 * alpha * AbarDD[i][k] * AbarUD[k][j] # Step 3.c.i: Define partial derivatives of \phi in terms of evolved quantity "cf": Bq.phi_and_derivs() phi_dD = Bq.phi_dD phi_dupD = Bq.phi_dupD exp_m4phi = Bq.exp_m4phi phi_dBarD = Bq.phi_dBarD # phi_dBarD = Dbar_i phi = phi_dD (since phi is a scalar) phi_dBarDD = Bq.phi_dBarDD # phi_dBarDD = Dbar_i Dbar_j phi (covariant derivative) # Step 3.c.ii: Define RbarDD Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU() RbarDD = Bq.RbarDD # Step 3.c.iii: Define first and second derivatives of \alpha, as well as # \bar{D}_i \bar{D}_j \alpha, which is defined just like phi alpha_dD = ixp.declarerank1("alpha_dD") alpha_dDD = ixp.declarerank2("alpha_dDD", "sym01") alpha_dBarD = alpha_dD alpha_dBarDD = ixp.zerorank2() GammabarUDD = Bq.GammabarUDD # Defined in Bq.gammabar__inverse_and_derivs() for i in range(DIM): for j in range(DIM): alpha_dBarDD[i][j] = alpha_dDD[i][j] for k in range(DIM): alpha_dBarDD[i][j] += - GammabarUDD[k][i][j] * alpha_dD[k] # Step 3.c.iv: Define the terms in curly braces: curlybrackettermsDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): curlybrackettermsDD[i][j] = -2 * alpha * phi_dBarDD[i][j] + 4 * alpha * phi_dBarD[i] * phi_dBarD[j] \ + 2 * alpha_dBarD[i] * phi_dBarD[j] \ + 2 * alpha_dBarD[j] * phi_dBarD[i] \ - alpha_dBarDD[i][j] + alpha * RbarDD[i][j] # Step 3.c.v: Compute the trace: curlybracketterms_trace = sp.sympify(0) for i in range(DIM): for j in range(DIM): curlybracketterms_trace += gammabarUU[i][j] * curlybrackettermsDD[i][j] # Step 3.c.vi: Third and final term of Abar_rhsDD[i][j]: for i in range(DIM): for j in range(DIM): Abar_rhsDD[i][j] += exp_m4phi * (curlybrackettermsDD[i][j] - sp.Rational(1, 3) * gammabarDD[i][j] * curlybracketterms_trace) # Step 4: Right-hand side of conformal factor variable "cf". Supported # options include: cf=phi, cf=W=e^(-2*phi) (default), and cf=chi=e^(-4*phi) # \partial_t phi = \left[\beta^k \partial_k \phi \right] <- TERM 1 # + \frac{1}{6} \left (\bar{D}_{k} \beta^{k} - \alpha K \right ) <- TERM 2 global cf_rhs cf_rhs = sp.Rational(1, 6) * (Dbarbetacontraction - alpha * trK) # Term 2 for k in range(DIM): cf_rhs += betaU[k] * phi_dupD[k] # Term 1 # Next multiply to convert phi_rhs to cf_rhs. if par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "phi": pass # do nothing; cf_rhs = phi_rhs elif par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "W": cf_rhs *= -2 * cf # cf_rhs = -2*cf*phi_rhs elif par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "chi": cf_rhs *= -4 * cf # cf_rhs = -4*cf*phi_rhs else: print("Error: EvolvedConformalFactor_cf == " + par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") + " unsupported!") sys.exit(1) # Step 5: right-hand side of trK (trace of extrinsic curvature): # \partial_t K = \beta^k \partial_k K <- TERM 1 # + \frac{1}{3} \alpha K^{2} <- TERM 2 # + \alpha \bar{A}_{i j} \bar{A}^{i j} <- TERM 3 # - - e^{-4 \phi} (\bar{D}_{i} \bar{D}^{i} \alpha + 2 \bar{D}^{i} \alpha \bar{D}_{i} \phi ) <- TERM 4 global trK_rhs # TERM 2: trK_rhs = sp.Rational(1, 3) * alpha * trK * trK trK_dupD = ixp.declarerank1("trK_dupD") for i in range(DIM): # TERM 1: trK_rhs += betaU[i] * trK_dupD[i] for i in range(DIM): for j in range(DIM): # TERM 4: trK_rhs += -exp_m4phi * gammabarUU[i][j] * (alpha_dBarDD[i][j] + 2 * alpha_dBarD[j] * phi_dBarD[i]) AbarUU = Bq.AbarUU # From Bq.AbarUU_AbarUD_trAbar() for i in range(DIM): for j in range(DIM): # TERM 3: trK_rhs += alpha * AbarDD[i][j] * AbarUU[i][j] # Step 6: right-hand side of \partial_t \bar{\Lambda}^i: # \partial_t \bar{\Lambda}^i = \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k <- TERM 1 # + \bar{\gamma}^{j k} \hat{D}_{j} \hat{D}_{k} \beta^{i} <- TERM 2 # + \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} <- TERM 3 # + \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} <- TERM 4 # - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \partial_{j} \phi) <- TERM 5 # + 2 \alpha \bar{A}^{j k} \Delta_{j k}^{i} <- TERM 6 # - \frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K <- TERM 7 # Step 6.a: Term 1 of \partial_t \bar{\Lambda}^i: \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k # First we declare \bar{\Lambda}^i and \bar{\Lambda}^i_{,j} in terms of \lambda^i and \lambda^i_{,j} global LambdabarU_dupD # Used on the RHS of the Gamma-driving shift conditions LambdabarU_dupD = ixp.zerorank2() lambdaU_dupD = ixp.declarerank2("lambdaU_dupD", "nosym") for i in range(DIM): for j in range(DIM): LambdabarU_dupD[i][j] = lambdaU_dupD[i][j] * rfm.ReU[i] + lambdaU[i] * rfm.ReUdD[i][j] global Lambdabar_rhsU # Used on the RHS of the Gamma-driving shift conditions Lambdabar_rhsU = ixp.zerorank1() for i in range(DIM): for k in range(DIM): Lambdabar_rhsU[i] += betaU[k] * LambdabarU_dupD[i][k] - betaU_dD[i][k] * LambdabarU[k] # Term 1 # Step 6.b: Term 2 of \partial_t \bar{\Lambda}^i = \bar{\gamma}^{jk} (Term 2a + Term 2b + Term 2c) # Term 2a: \bar{\gamma}^{jk} \beta^i_{,kj} Term2aUDD = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): Term2aUDD[i][j][k] += betaU_dDD[i][k][j] # Term 2b: \hat{\Gamma}^i_{mk,j} \beta^m + \hat{\Gamma}^i_{mk} \beta^m_{,j} # + \hat{\Gamma}^i_{dj}\beta^d_{,k} - \hat{\Gamma}^d_{kj} \beta^i_{,d} Term2bUDD = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): for m in range(DIM): Term2bUDD[i][j][k] += rfm.GammahatUDDdD[i][m][k][j] * betaU[m] \ + rfm.GammahatUDD[i][m][k] * betaU_dD[m][j] \ + rfm.GammahatUDD[i][m][j] * betaU_dD[m][k] \ - rfm.GammahatUDD[m][k][j] * betaU_dD[i][m] # Term 2c: \hat{\Gamma}^i_{dj}\hat{\Gamma}^d_{mk} \beta^m - \hat{\Gamma}^d_{kj} \hat{\Gamma}^i_{md} \beta^m Term2cUDD = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): for m in range(DIM): for d in range(DIM): Term2cUDD[i][j][k] += (rfm.GammahatUDD[i][d][j] * rfm.GammahatUDD[d][m][k] \ - rfm.GammahatUDD[d][k][j] * rfm.GammahatUDD[i][m][d]) * betaU[m] Lambdabar_rhsUpieceU = ixp.zerorank1() # Put it all together to get Term 2: for i in range(DIM): for j in range(DIM): for k in range(DIM): Lambdabar_rhsU[i] += gammabarUU[j][k] * (Term2aUDD[i][j][k] + Term2bUDD[i][j][k] + Term2cUDD[i][j][k]) Lambdabar_rhsUpieceU[i] += gammabarUU[j][k] * ( Term2aUDD[i][j][k] + Term2bUDD[i][j][k] + Term2cUDD[i][j][k]) # Step 6.c: Term 3 of \partial_t \bar{\Lambda}^i: # \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} DGammaU = Bq.DGammaU # From Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU() for i in range(DIM): Lambdabar_rhsU[i] += sp.Rational(2, 3) * DGammaU[i] * Dbarbetacontraction # Term 3 # Step 6.d: Term 4 of \partial_t \bar{\Lambda}^i: # \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} detgammabar_dDD = Bq.detgammabar_dDD # From Bq.detgammabar_and_derivs() Dbarbetacontraction_dBarD = ixp.zerorank1() for k in range(DIM): for m in range(DIM): Dbarbetacontraction_dBarD[m] += betaU_dDD[k][k][m] + \ (betaU_dD[k][m] * detgammabar_dD[k] + betaU[k] * detgammabar_dDD[k][m]) / (2 * detgammabar) \ - betaU[k] * detgammabar_dD[k] * detgammabar_dD[m] / ( 2 * detgammabar * detgammabar) for i in range(DIM): for m in range(DIM): Lambdabar_rhsU[i] += sp.Rational(1, 3) * gammabarUU[i][m] * Dbarbetacontraction_dBarD[m] # Step 6.e: Term 5 of \partial_t \bar{\Lambda}^i: # - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \alpha \partial_{j} \phi) for i in range(DIM): for j in range(DIM): Lambdabar_rhsU[i] += -2 * AbarUU[i][j] * (alpha_dD[j] - 6 * alpha * phi_dD[j]) # Step 6.f: Term 6 of \partial_t \bar{\Lambda}^i: # 2 \alpha \bar{A}^{j k} \Delta^{i}_{j k} DGammaUDD = Bq.DGammaUDD # From RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU() for i in range(DIM): for j in range(DIM): for k in range(DIM): Lambdabar_rhsU[i] += 2 * alpha * AbarUU[j][k] * DGammaUDD[i][j][k] # Step 6.g: Term 7 of \partial_t \bar{\Lambda}^i: # -\frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K trK_dD = ixp.declarerank1("trK_dD") for i in range(DIM): for j in range(DIM): Lambdabar_rhsU[i] += -sp.Rational(4, 3) * alpha * gammabarUU[i][j] * trK_dD[j] # Step 7: Rescale the RHS quantities so that the evolved # variables are smooth across coord singularities global h_rhsDD,a_rhsDD,lambda_rhsU h_rhsDD = ixp.zerorank2() a_rhsDD = ixp.zerorank2() lambda_rhsU = ixp.zerorank1() for i in range(DIM): lambda_rhsU[i] = Lambdabar_rhsU[i] / rfm.ReU[i] for j in range(DIM): h_rhsDD[i][j] = gammabar_rhsDD[i][j] / rfm.ReDD[i][j] a_rhsDD[i][j] = Abar_rhsDD[i][j] / rfm.ReDD[i][j]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Nov 17 09:42:30 2018 @author: xsxsz """ import sympy as sy sy.init_printing(use_unicode=True, use_latex=True) x, y = sy.symbols('x y') expr = x + y**2 latex = sy.latex(expr) print(latex) print('----------') print(sy.solve(x + 1, x)) print('----------') print(sy.limit(sy.sin(x) / x, x, 0)) print('----------') print(sy.limit(sy.sin(x) / x, x, sy.oo)) print('----------') print(sy.limit(sy.ln(x + 1) / x, x, 0)) print('----------') print(sy.integrate(sy.sin(x), (x, -sy.oo, sy.oo))) print('----------') print(sy.integrate(1 / x, (x, 1, 2))) print('----------') print(sy.Rational(1, 2)) print('----------')
def getFixedStats(model_name,data_dir,D_protein_costs,USE_COLUMN_CROSSCHECK=True,IS_FVA=False): """ Input: - *model_name* (string) - *USE_COLUMN_CROSSCHECK* (boolean) [default = True] """ model_filename = '{0:s}.xml'.format(model_name) sbml_dir = os.path.join(data_dir,'models','sbml') vertex_dir = os.path.join(data_dir,'cope_fba','whole_model','vertex') fluxmodules_dir = os.path.join(data_dir,'flux_modules') cost_dir = os.path.join(data_dir,'protein_costs') H_format_dir = os.path.join(data_dir,'models','h-format') try: cmod = cbm.CBRead.readSBML3FBC(model_filename,sbml_dir) except: cmod = cbm.CBRead.readSBML2FBA(model_filename,sbml_dir) lp = cbm.CBSolver.analyzeModel(cmod,return_lp_obj = True) if USE_COLUMN_CROSSCHECK: ColumnCrossCheck(cmod,model_name,H_format_dir) # Perform column cross check to make sure that identifiers match L_arne_module_r_ids = ParseFluxModules('modules.txt',fluxmodules_dir) nvariable_fluxes = len(L_arne_module_r_ids) fixed_cost=0 fixed_sumAbsFluxes=0 fixed_nfluxes=0 L_fixed_r_ids = [] L_variable_r_ids = [] D_fluxes = ParseRationalFBA(model_name,H_format_dir) for r_id in D_fluxes: if r_id not in L_arne_module_r_ids: # == fixed J_r = D_fluxes[r_id] if J_r != 0: L_fixed_r_ids.append(r_id) fixed_nfluxes+=1 fixed_sumAbsFluxes+=abs(J_r) if D_protein_costs: fixed_cost+=abs(J_r)*D_protein_costs[r_id] else: L_variable_r_ids.append(r_id) if IS_FVA: try: columns_in = open(os.path.join(H_format_dir,'{0:s}.noinf_r.columns.txt'.format(model_name))) fva_in = open(os.path.join(vertex_dir,'{0:s}.noinf_r.ine.opt.fva'.format(model_name))) except Exception as er: print(er) sys.exit() nJv=0 nJf=0 nJnf=0 fixed_cost=0 fixed_sumAbsFluxes=0 for col,fva in zip(columns_in,fva_in): J_r = sympy.Rational(fva.split(':')[1]) r_id = col.split(',')[1].strip() if 'FIXED' in fva: assert r_id not in L_arne_module_r_ids, "Error: Unexpected behavior, fixed reactions cannot be part of a 'Fluxmodule'!" if J_r != 0: nJf+=1 fixed_sumAbsFluxes+=abs(J_r) if D_protein_costs: fixed_cost+=abs(J_r)*D_protein_costs[r_id] else: nJnf += 1 elif 'VARIABLE' in fva: nJv+=1 assert nvariable_fluxes == nJv, "Error: Unexpected behavior, number of variable fluxes must be equal to the number of fluxes detected in all FluxModules together!" assert fixed_nfluxes == nJf, "Error: Unexpected behavior, number of variable fluxes must be equal to the number of fluxes detected in all FluxModules together!" variable_IO = tools.getApproximateInputOutputRelationship(cmod,L_variable_r_ids,D_fluxes,isboundary=True) overal_IO = tools.getApproximateInputOutputRelationship(cmod, sorted(D_fluxes),D_fluxes,isboundary=True) return fixed_nfluxes,nvariable_fluxes,float(fixed_cost),float(fixed_sumAbsFluxes),variable_IO,overal_IO
a, dt, I, n = sym.symbols('a dt I n') u = sym.Function('u') f = u(n+1) - u(n) + dt*a*u(n+1) sym.rsolve(f, u(n), {u(0): I}) # However, 0 is the answer! # Experimentation shows that we cannot have symbols dt, a in the # recurrence equation, just n or numbers. # Even if we worked with scaled equations, dt is in there, # rsolve cannot be applied. """ # Numerical amplification factor theta = sym.Symbol('theta') A = (1-(1-theta)*p)/(1+theta*p) half = sym.Rational(1,2) # Interactive session for demonstrating subs A.subs(theta, 1) # A for Backward Euler A.subs(theta, half) # Crank-Nicolson A.subs(theta, 0).series(p, 0, 4) # Taylor-expanded A for Forward Euler A.subs(theta, 1).series(p, 0, 4) # Taylor-expanded A for Backward Euler A.subs(theta, half).series(p, 0, 4) # Taylor-expanded A for C-N A_e.series(p, 0, 4) # Taylor-expanded exact A # Error in amplification factors FE = A_e.series(p, 0, 4) - A.subs(theta, 0).series(p, 0, 4) BE = A_e.series(p, 0, 4) - A.subs(theta, 1).series(p, 0, 4) CN = A_e.series(p, 0, 4) - A.subs(theta, half).series(p, 0, 4) FE BE CN
def compute_u0_smallb_Poynting__Cartesian(gammaDD=None, betaU=None, alpha=None, ValenciavU=None, BU=None): if gammaDD == None: # Declare these generically if uninitialized. gammaDD = ixp.declarerank2("gammaDD", "sym01") betaU = ixp.declarerank1("betaU") alpha = sp.sympify("alpha") ValenciavU = ixp.declarerank1("ValenciavU") BU = ixp.declarerank1("BU") # Set spatial dimension = 3 DIM = 3 thismodule = "smallbPoynET" # To get \gamma_{\mu \nu} = gammabar4DD[mu][nu], we'll need to construct the 4-metric, using Eq. 2.122 in B&S: # Eq. 2.121 in B&S betaD = ixp.zerorank1() for i in range(DIM): for j in range(DIM): betaD[i] += gammaDD[i][j] * betaU[j] # Now compute the beta contraction. beta2 = sp.sympify(0) for i in range(DIM): beta2 += betaU[i] * betaD[i] # Eq. 2.122 in B&S global g4DD g4DD = ixp.zerorank2(DIM=4) g4DD[0][0] = -alpha**2 + beta2 for i in range(DIM): g4DD[i + 1][0] = g4DD[0][i + 1] = betaD[i] for i in range(DIM): for j in range(DIM): g4DD[i + 1][j + 1] = gammaDD[i][j] # ## Step 2: Compute $u^0$ from the Valencia 3-velocity # # According to Eqs. 9-11 of [the IllinoisGRMHD paper](https://arxiv.org/pdf/1501.07276.pdf), the Valencia 3-velocity $v^i_{(n)}$ is related to the 4-velocity $u^\mu$ via # # \begin{align} # \alpha v^i_{(n)} &= \frac{u^i}{u^0} + \beta^i \\ # \implies u^i &= u^0 \left(\alpha v^i_{(n)} - \beta^i\right) # \end{align} # # Defining $v^i = \frac{u^i}{u^0}$, we get # # $$v^i = \alpha v^i_{(n)} - \beta^i,$$ # # and in terms of this variable we get # # \begin{align} # g_{00} \left(u^0\right)^2 + 2 g_{0i} u^0 u^i + g_{ij} u^i u^j &= \left(u^0\right)^2 \left(g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j\right)\\ # \implies u^0 &= \pm \sqrt{\frac{-1}{g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j}} \\ # &= \pm \sqrt{\frac{-1}{(-\alpha^2 + \beta^2) + 2 \beta_i v^i + \gamma_{ij} v^i v^j}} \\ # &= \pm \sqrt{\frac{1}{\alpha^2 - \gamma_{ij}\left(\beta^i + v^i\right)\left(\beta^j + v^j\right)}}\\ # &= \pm \sqrt{\frac{1}{\alpha^2 - \alpha^2 \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\ # &= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}} # \end{align} # # Generally speaking, numerical errors will occasionally drive expressions under the radical to either negative values or potentially enormous values (corresponding to enormous Lorentz factors). Thus a reliable approach for computing $u^0$ requires that we first rewrite the above expression in terms of the Lorentz factor squared: $\Gamma^2=\left(\alpha u^0\right)^2$: # \begin{align} # u^0 &= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\ # \implies \left(\alpha u^0\right)^2 &= \frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}} \\ # \implies \gamma_{ij}v^i_{(n)}v^j_{(n)} &= 1 - \frac{1}{\left(\alpha u^0\right)^2} # \end{align} # In order for the bottom expression to hold true, the left-hand side must be between 0 and 1. Again, this is not guaranteed due to the appearance of numerical errors. In fact, a robust algorithm will not allow $\Gamma^2$ to become too large (which might contribute greatly to the stress-energy of a given gridpoint), so let's define $\Gamma_{\rm max}$, the largest allowed Lorentz factor. # # Then our algorithm for computing $u^0$ is as follows: # # If # $$R=\gamma_{ij}v^i_{(n)}v^j_{(n)}>1 - \frac{1}{\Gamma_{\rm max}},$$ # then adjust the 3-velocity $v^i$ as follows: # # $$v^i_{(n)} = \sqrt{\frac{1 - \frac{1}{\Gamma_{\rm max}}}{R}}v^i_{(n)}.$$ # # After this rescaling, we are then guaranteed that if $R$ is recomputed, it will be set to its ceiling value $R=1 - \frac{1}{\Gamma_{\rm max}}$. # # Then $u^0$ can be safely computed via # u^0 = \frac{1}{\alpha \sqrt{1-R}}. # Step 1: Compute R = 1 - 1/max(Gamma) R = sp.sympify(0) for i in range(DIM): for j in range(DIM): R += gammaDD[i][j] * ValenciavU[i] * ValenciavU[j] GAMMA_SPEED_LIMIT = par.Cparameters("REAL", thismodule, "GAMMA_SPEED_LIMIT") Rmax = 1 - 1 / GAMMA_SPEED_LIMIT rescaledValenciavU = ixp.zerorank1() for i in range(DIM): rescaledValenciavU[i] = ValenciavU[i] * sp.sqrt(Rmax / R) rescaledu0 = 1 / (alpha * sp.sqrt(1 - Rmax)) regularu0 = 1 / (alpha * sp.sqrt(1 - R)) global computeu0_Cfunction computeu0_Cfunction = "/* Function for computing u^0 from Valencia 3-velocity. */\n" computeu0_Cfunction += "/* Inputs: ValenciavU[], alpha, gammaDD[][], GAMMA_SPEED_LIMIT (C parameter) */\n" computeu0_Cfunction += "/* Output: u0=u^0 and velocity-limited ValenciavU[] */\n\n" computeu0_Cfunction += outputC( [R, Rmax], ["const double R", "const double Rmax"], "returnstring", params="includebraces=False,CSE_varprefix=tmpR,outCverbose=False") computeu0_Cfunction += "if(R <= Rmax) " computeu0_Cfunction += outputC( regularu0, "u0", "returnstring", params="includebraces=True,CSE_varprefix=tmpnorescale,outCverbose=False" ) computeu0_Cfunction += " else " computeu0_Cfunction += outputC( [ rescaledValenciavU[0], rescaledValenciavU[1], rescaledValenciavU[2], rescaledu0 ], ["ValenciavU0", "ValenciavU1", "ValenciavU2", "u0"], "returnstring", params="includebraces=True,CSE_varprefix=tmprescale,outCverbose=False") # ## Step 3: Compute $u_j$ from $u^0$, the Valencia 3-velocity, and $g_{\mu\nu}$ # The basic equation is # u_j &= g_{\mu j} u^{\mu} \\ # &= g_{0j} u^0 + g_{ij} u^i \\ # &= \beta_j u^0 + \gamma_{ij} u^i \\ # &= \beta_j u^0 + \gamma_{ij} u^0 \left(\alpha v^i_{(n)} - \beta^i\right) \\ # &= u^0 \left(\beta_j + \gamma_{ij} \left(\alpha v^i_{(n)} - \beta^i\right) \right)\\ # &= \alpha u^0 \gamma_{ij} v^i_{(n)} \\ global u0 u0 = par.Cparameters("REAL", thismodule, "u0") global uD uD = ixp.zerorank1() for i in range(DIM): for j in range(DIM): uD[j] += alpha * u0 * gammaDD[i][j] * ValenciavU[j] # ## Step 4: Compute $\gamma=\text{gammaDET}$ from the ADM 3+1 variables # This is accomplished simply, using the symmetric matrix inversion function in indexedexp.py: gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) # ## Step 5: Compute $\beta^\mu$ from above expressions. # \sqrt{4\pi} b^0 = B^0_{\rm (u)} &= \frac{u_j B^j}{\alpha} \\ # \sqrt{4\pi} b^i = B^i_{\rm (u)} &= \frac{B^i + (u_j B^j) u^i}{\alpha u^0}\\ # $B^i$ is related to the actual magnetic field evaluated in IllinoisGRMHD, $\tilde{B}^i$ via # # $$B^i = \frac{\tilde{B}^i}{\gamma},$$ # # where $\gamma$ is the determinant of the spatial 3-metric. # # Pulling this together, we currently have available as input: # + $\tilde{B}^i$ # + $\gamma$ # + $u_j$ # + $u^0$, # # with the goal of outputting now $b^\mu$ and $b^2$: M_PI = par.Cparameters("REAL", thismodule, "M_PI") # uBcontraction = u_i B^i global uBcontraction uBcontraction = sp.sympify(0) for i in range(DIM): uBcontraction += uD[i] * BU[i] # uU = 3-vector representing u^i = u^0 \left(\alpha v^i_{(n)} - \beta^i\right) global uU uU = ixp.zerorank1() for i in range(DIM): uU[i] = u0 * (alpha * ValenciavU[i] - betaU[i]) global smallb4U smallb4U = ixp.zerorank1(DIM=4) smallb4U[0] = uBcontraction / (alpha * sp.sqrt(4 * M_PI)) for i in range(DIM): smallb4U[1 + i] = (BU[i] + uBcontraction * uU[i]) / (alpha * u0 * sp.sqrt(4 * M_PI)) # ## Part 2 of 2: Computing the Poynting Flux Vector $S^{i}$ # # The Poynting flux is defined in Eq. 11 of [Kelly *et al*](https://arxiv.org/pdf/1710.02132.pdf): # S^i = -\alpha T^i_{\rm EM\ 0} = \alpha\left(b^2 u^i u_0 + \frac{1}{2} b^2 g^i{}_0 - b^i b_0\right) # ## Part 2, Step 1: Computing $g^{i\nu}$: # We have already computed all of these quantities above, except $g^i{}_0$ so let's now construct this object: # g^i{}_0 = g^{i\nu} g_{\nu 0}, # which itself requires $g^{i\nu}$ be defined (Eq. 4.49 in [Gourgoulhon](https://arxiv.org/pdf/gr-qc/0703035.pdf)): # g^{\mu\nu} = \begin{pmatrix} # -\frac{1}{\alpha^2} & \frac{\beta^i}{\alpha^2} \\ # \frac{\beta^i}{\alpha^2} & \gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2} # \end{pmatrix}, # where $\gamma^{ij}$ was defined above where we computed $\text{gammaDET}$. global g4UU g4UU = ixp.zerorank2(DIM=4) g4UU[0][0] = -1 / alpha**2 for i in range(DIM): g4UU[0][i + 1] = g4UU[i + 1][0] = betaU[i] / alpha**2 for i in range(DIM): for j in range(DIM): g4UU[i + 1][j + 1] = gammaUU[i][j] - betaU[i] * betaU[j] / alpha**2 # ## Part 2, Step 2: Computing $S^{i}$ # # We start by computing # g^\mu{}_\delta = g^{\mu\nu} g_{\nu \delta}, # and then the rest of the Poynting flux vector can be immediately computed from quantities defined above: # S^i = \alpha T^i_{\rm EM\ 0} = -\alpha\left(b^2 u^i u_0 + \frac{1}{2} b^2 g^i{}_0 - b^i b_0\right) # Step 2a: compute g^\mu_\delta: g4UD = ixp.zerorank2(DIM=4) for mu in range(4): for delta in range(4): for nu in range(4): g4UD[mu][delta] += g4UU[mu][nu] * g4DD[nu][delta] # Step 2b: compute b_{\mu} global smallb4D smallb4D = ixp.zerorank1(DIM=4) for mu in range(4): for nu in range(4): smallb4D[mu] += g4DD[mu][nu] * smallb4U[nu] # Step 2c: compute u_0 = g_{mu 0} u^{mu} = g4DD[0][0]*u0 + g4DD[i][0]*uU[i] u_0 = g4DD[0][0] * u0 for i in range(DIM): u_0 += g4DD[i + 1][0] * uU[i] # Step 2d: compute b^2 global smallb2etk smallb2etk = sp.sympify(0) for mu in range(4): smallb2etk += smallb4U[mu] * smallb4D[mu] # Step 2d: compute S^i global PoynSU PoynSU = ixp.zerorank1() for i in range(DIM): PoynSU[i] = -alpha * (smallb2etk * uU[i] * u_0 + sp.Rational(1, 2) * smallb2etk * g4UD[i + 1][0] - smallb4U[i + 1] * smallb4D[0])
def __init__(self, **kwargs): if 'seed' in kwargs: self.seed = kwargs['seed'] else: self.seed = random.random() random.seed(self.seed) x = sy.Symbol('x') self.x = x degree = random.choice([3,4]) #degree = 4 zeroes = [] force_duplicate = random.choice([True, False]) # force_duplicate = True def have_duplicate(a): return len(a) != len(set(a)) if force_duplicate: random.seed(self.seed) while not(have_duplicate(zeroes)): zeroes = [] for i in range(degree): z = random.randint(-5,5) while any([abs(z-r) in [1,2] for r in zeroes]): z = random.randint(-5,5) zeroes.append(z) else: random.seed(self.seed) for i in range(degree): z = random.randint(-5,5) while any([abs(z-r) in [1,2] for r in zeroes]): z = random.randint(-5,5) zeroes.append(z) # print(zeroes) # random.seed(self.seed) y_0 = random_non_zero_integer(-9,9) prod = 1 x_0 = 0 while x_0 in zeroes: x_0 = random_non_zero_integer(-5,5) for z in zeroes: prod *= (x_0-z) LC = sy.Rational(y_0, prod) def f(x): out = LC for i in range(degree): out *= (x-zeroes[i]) return out self.answer = f(x) # print('answer', self.answer) expr = sy.simplify(1/LC*f(x)) self.as_lambda = sy.lambdify(x, f(x)) # f = self.as_lambda self.given = sy.factor(f(x)) # self.format_given = f'\\(f(x) = {sy.latex(LC)}{sy.latex(expr)}\\)' self.format_answer = f'\\(f(x) = {sy.latex(LC)}{sy.latex(expr)}\\)' self.prompt_single = f"""The graph is that of either a degree 3 or degree 4 polynomial. Give an equation for the graph. Note that the graph passes through the point \\({sy.latex((x_0, y_0))}\\)""" self.further_instruction = """Write 'f(x) =' or 'y = ' and then and expression for your function. """ self.format_given_for_tex = f""" {self.prompt_single} """ points = [[z, 0] for z in zeroes] points += [[x_0, y_0]] self.points = points poly_points = self.get_svg_data([-10,10]) self.format_given = f"""
def rationalE(): """ Generate a random rational for the structure matrix """ num = 1 + randrange(3) #denom = 1+randrange(1) return sp.Rational(num)
bchvec = [sp.S(0)] * n for arg in bchexpr.args: bchvec[countDim(arg, A, B)] += arg E = llegir_base("niats_base.txt", A, B) print() printi("BCH en termes dels elements de la base") t0 = tm.time() for i in range(1, len(bchvec)): c = sp.symbols("s1:" + str(len(E[i]))) cesq = sp.S(0) for m in range(1, len(E[i])): cesq += c[m - 1] * E[i][m] cesq = cesq.doit().expand() sol = resoldre(cesq, bchvec[i]) cad = "" nt = 0 for m in sol: if sol[m] != 0: nume, deno = sp.fraction(sol[m]) frac = sp.Rational(nume, deno) if frac > 0: cad += "+" cad += str(frac) + "*E[" + str(i) + "][" + str(c.index(m) + 1) + "]" # cad += str(frac) + "*" + str(E[i][c.index(m) + 1]) nt += 1 print(str(i) + "(" + str(nt) + ") ->", cad) t1 = tm.time() print(t1 - t0, "s")
def rationalQ(): """ Generate a random rational for the moment matrix""" num = randrange(100) denom = randrange(100) return sp.Rational(num, denom)
def balance(eq): """ Function: balance(eq) INPUT: A string, representing a chemical equation OUTPUT: A string, representing the balanced chemical equation Definition: A function that balances chemical equations. So, it turns strings of the form 'H2+O2=H2O' into '2H2+O2=2H2O'. """ linear_system = [] OPERANDS = [] middle = (re.search('=', eq)).start() operand_indices = [] operands_in_eq = re.findall("[a-zA-Z0-9]+", eq) for i in operands_in_eq: operand_indices.append(re.search(i, eq).start()) elements_in_eq = re.findall("[A-Z][a-z]*", eq) elements = dict() for i in range(len(elements_in_eq)): try: holder = elements[elements_in_eq[i]] except KeyError: elements[elements_in_eq[i]] = 0 for i in range(len(operands_in_eq)): elements_in_operand = re.findall("[A-Z][a-z]*[0-9]*", operands_in_eq[i]) operand_contents = dict() for j in elements_in_operand: count = re.findall("[0-9]+", j) try: numhold = operand_contents[j] operand_contents[j] = numhold + 1 except KeyError: if count == []: operand_contents[j] = 1 else: operand_contents[j[0:len(j) - 1]] = int(count[0]) OPERANDS.append( Stoich_Operand(operands_in_eq[i], operand_contents, operand_indices[i])) for i in elements: linear_equation = [] for j in OPERANDS: try: numhold = j.operand_contents[i] if j.operand_index > middle: numhold *= -1 linear_equation.append(numhold) except KeyError: linear_equation.append(0) linear_equation.append(0) raw_eq_solve = "" n = len(linear_equation) x = [parse_expr('x%d' % i) for i in range(n)] for j in range(len(linear_equation)): raw_eq_solve = raw_eq_solve + str(linear_equation[j]) + "*" + str( x[j]) + "+" parse_eq_solve = parse_expr(raw_eq_solve[0:len(raw_eq_solve) - 1]) dict_eq_solve = sp.solve(parse_eq_solve, *x) normalize_x = dict() exp_equations = [] exp_equations_args = [] for j in x: normalize_x[j] = 1 for j in x: try: value = str(dict_eq_solve[0][j]) equat = str(j) + " - (" + value + ")" exp_equat = parse_expr(equat) exp_equations.append(exp_equat) b = len(exp_equat.args) for n in range(b): exp_equations_args.append(exp_equat.args[n].args) except KeyError: continue linear_equation = [] for j in x: var_present = re.findall(str(j), str(exp_equations[0])) if var_present == []: linear_equation.append(0) else: var_present_args = 0 for p in exp_equations_args: try: if p[len(p) - 1] == j: var_present_args = 1 linear_equation.append(p[0]) except IndexError: continue if var_present_args == 0: linear_equation.append(1) linear_system.append(linear_equation) M = sp.Matrix(linear_system) n = len(linear_system[0]) x = [parse_expr('x%d' % i) for i in range(n)] sols = sp.solve_linear_system(M, *x) coefficients = dict() for i in x: coefficients[i] = 1 for key in sols: try: coefficients[key] = (sols[key]).args[0] except IndexError: coefficients[key] = 1 L_Denoms = [] for i in coefficients: L_Denoms.append(sp.fraction(sp.Rational(coefficients[i]))[1]) multiplier = sp.lcm(L_Denoms) for i in coefficients: coefficients[i] = coefficients[i] * multiplier # print coefficients balanced_Stoich = "" equals_placed = 0 for i in range(len(x) - 1): if coefficients[x[i]] == 1: balanced_Stoich += str(OPERANDS[i].operand) else: balanced_Stoich += str(coefficients[x[i]]) + str( OPERANDS[i].operand) if equals_placed == 0: if i < len(x) - 2: if OPERANDS[i + 1].operand_index > middle: balanced_Stoich += " = " equals_placed = 1 else: balanced_Stoich += " + " else: balanced_Stoich += " + " else: balanced_Stoich += " + " return balanced_Stoich[0:len(balanced_Stoich) - 3]
def _reciprocate_integer(i,useRational=None): # XXX this is never used if useRational is None: useRational=_defaultUseRational if useRational: return sympy.Rational(1,i) return 1.0/i
def plot_kepler_heisenberg_dynamics (): x, y, z, p_x, p_y, p_z = sp.var('x, y, z, p_x, p_y, p_z') q = np.array([x, y, z]) p = np.array([p_x, p_y, p_z]) # These are Darboux coordinates on T^{*} R^3 qp = np.array([q, p]) vorpy.symplectic.validate_darboux_coordinates_quantity_or_raise(qp) P_x = p_x - y*p_z/2 P_y = p_y + x*p_z/2 K = (P_x**2 + P_y**2)/2 r_squared = x**2 + y**2 U = -1 / (8*sp.pi*sp.sqrt(r_squared**2 + 16*z**2)) H = K + U X = vorpy.symplectic.symplectic_gradient_of(H, qp) DX = vorpy.symbolic.differential(X, qp) # Phase space has shape (2,3), so if F is a time-t flow map, then DF is a matrix with shape (2,3,2,3). J = vorpy.symbolic.tensor('J', qp.shape+qp.shape) # Make the symplectomorphicity_condition a scalar. S_cond = vorpy.symplectic.symplectomorphicity_condition(J, dtype=sp.Integer, return_as_scalar_if_possible=True) S = sp.sqrt(np.sum(np.square(S_cond))).simplify() print(f'H = {H}') print(f'X = {X}') print(f'DX = {DX}') replacement_d = { 'array':'np.array', 'cos':'np.cos', 'sin':'np.sin', 'sqrt':'np.sqrt', 'pi':'np.pi', 'dtype=object':'dtype=float', } X_fast = vorpy.symbolic.lambdified(X, qp, replacement_d=replacement_d, verbose=True) DX_fast = vorpy.symbolic.lambdified(DX, qp, replacement_d=replacement_d, verbose=True) H_fast = vorpy.symbolic.lambdified(H, qp, replacement_d=replacement_d, verbose=True) S_cond_fast = vorpy.symbolic.lambdified(S_cond, J, replacement_d=replacement_d, verbose=True) S_fast = vorpy.symbolic.lambdified(S, J, replacement_d=replacement_d, verbose=True) t_initial = 0.0 t_final = 50.0 #t_final = 1000.0 def plot_function (axis, results): axis.set_title('(x(t), y(t))') axis.set_aspect(1.0) axis.plot(results.y_t[:,0,0], results.y_t[:,0,1]) axis.plot([0.0], [0.0], '.', color='black') #S_cond_t = vorpy.apply_along_axes(S_cond_fast, apply_along_J_t_axes, (results.J_t,)) #axis.set_title('S_cond') #axis.plot(results.t_v, S_cond_t.reshape(len(results.t_v), -1)) #max_abs_S_cond_v = vorpy.apply_along_axes(lambda x:np.max(np.abs(S_cond_fast(x))), apply_along_J_t_axes, (results.J_t,)) #overall_max = np.max(max_abs_S_cond_v) #axis.set_title(f'max abs S_cond - max over all time: {overall_max}') #axis.semilogy(results.t_v, max_abs_S_cond_v) #axis.set_title('time step size') #axis.semilogy(results.t_v[:-1], results.t_step_v, '.', alpha=0.1) def plot_function_2 (axis, results): axis.set_title('(t, z(t))') axis.plot(results.t_v, results.y_t[:,0,2]) axis.axhline(0.0, color='black') #H_initial_v = [sp.Rational(n,4) for n in range(0,2+1)] ##H_initial_v = [sp.Rational(n,4) for n in range(-2,2+1)] #x_initial_v = [float(sp.Rational(n,8) + 1) for n in range(-2,2+1)] #assert 1.0 in x_initial_v # We want exactly 1 to be in this. #p_x_initial_v = [float(sp.Rational(n,16)) for n in range(-2,2+1)] #assert 0.0 in p_x_initial_v # We want exactly 0 to be in this. #p_theta_initial_v = np.linspace(0.05, 0.4, 3) H_initial_v = [sp.Integer(0)] x_initial_v = [1.0] assert 1.0 in x_initial_v # We want exactly 1 to be in this. p_x_initial_v = [float(sp.Rational(n,16)) for n in range(-3,3+1)] assert 0.0 in p_x_initial_v # We want exactly 0 to be in this. p_theta_initial_v = np.linspace(0.05, 0.4, 10) for H_initial in H_initial_v: # For now, we want to pick an initial condition where H == 0, so solve symbolically for p_z. Just # use sheet_index == 0. sheet_index = 0 p_z_solution_v = sp.solve(H - H_initial, p_z) print(f'There are {len(p_z_solution_v)} solutions for the equation: {H} = {H_initial}') for i,p_z_solution in enumerate(p_z_solution_v): print(f' solution {i}: p_z = {p_z_solution}') # Take the solution specified by sheet_index p_z_solution = p_z_solution_v[sheet_index] print(f'using solution {sheet_index}: {p_z_solution}') p_z_solution_fast = vorpy.symbolic.lambdified(p_z_solution, qp, replacement_d=replacement_d, verbose=True) for x_initial,p_x_initial,p_theta_initial in itertools.product(x_initial_v, p_x_initial_v, p_theta_initial_v): # Using the symmetry arguments in KH paper, the initial conditions can be constrained. y_initial = np.array([[x_initial, 0.0, 0.0], [p_x_initial, p_theta_initial, np.nan]]) p_z_initial = p_z_solution_fast(y_initial) print(f'p_z_initial = {p_z_initial}') y_initial[1,2] = p_z_initial print(f'y_initial:\n{y_initial}') apply_along_y_t_axes = (1,2) apply_along_J_t_axes = (1,2,3,4) plot_p = pathlib.Path('kh.06.cartesian') / f'H={float(H_initial)}.x={x_initial}.p_x={p_x_initial}.p_theta={p_theta_initial}.t_final={t_final}.png' plot_dynamics(plot_p, t_initial, t_final, [y_initial], X_fast, DX_fast, H_fast, S_fast, apply_along_y_t_axes, apply_along_J_t_axes, plot_function_o=plot_function, plot_function_2_o=plot_function_2, write_pickle=True)
tau_pi_x = Cymbol(r'\tau^\pi_x', codename='tau_pi_x', real=True) tau_pi_y = Cymbol(r'\tau^\pi_y', codename='tau_pi_y', real=True) X_x = Cymbol('X_x', real=True) X_y = Cymbol('X_y', real=True) Z = Cymbol('Z', real=True, nonnegative=True) Y_T = Cymbol('Y_T', real=True) sig = Cymbol(r'\sigma', real=True) sig_pi = Cymbol(r'\sigma^\pi', codename='sig_pi', real=True) Y_N = Cymbol('Y_N', real=True) Sig = sp.Matrix([sig_pi, tau_pi_x, tau_pi_y, Z, X_x, X_y, Y_T, Y_N]) # ## Helmholtz free energy rho_psi_T_ = sp.Rational(1, 2) * ((1 - omega_T) * E_T * (s_x - s_pi_x)**2 + (1 - omega_T) * E_T * (s_y - s_pi_y)**2 + K_T * z**2 + gamma_T * alpha_x**2 + gamma_T * alpha_y**2) rho_psi_N_ = sp.Rational(1, 2) * (1 - H(sig_pi) * omega_N) * E_N * (w - w_pi)**2 rho_psi_ = rho_psi_T_ + rho_psi_N_ # The introduce the thermodynamic forces we have to differentiate Hemholtz free energy # with respect to the kinematic state variables # \begin{align} # \frac{\partial \rho \psi }{\partial \boldsymbol{\mathcal{E}}} # \end{align}