コード例 #1
0
ファイル: test_array_utils.py プロジェクト: wenxiang-Li/sympy
def test_contraction_permutation_mix():

    Me = M.subs(k, 3).as_explicit()
    Ne = N.subs(k, 3).as_explicit()

    cg1 = CodegenArrayContraction(
        CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N),
                                Permutation([0, 2, 1, 3])), (2, 3))
    cg2 = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 3))
    assert cg1 == cg2
    assert recognize_matrix_expression(cg2) == M * N.T
    cge1 = tensorcontraction(
        permutedims(tensorproduct(Me, Ne), Permutation([0, 2, 1, 3])), (2, 3))
    cge2 = tensorcontraction(tensorproduct(Me, Ne), (1, 3))
    assert cge1 == cge2

    cg1 = CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N),
                                  Permutation([0, 1, 3, 2]))
    cg2 = CodegenArrayTensorProduct(
        M, CodegenArrayPermuteDims(N, Permutation([1, 0])))
    assert cg1 == cg2
    assert recognize_matrix_expression(cg1) == CodegenArrayTensorProduct(
        M, N.T)
    assert recognize_matrix_expression(cg2) == CodegenArrayTensorProduct(
        M, N.T)

    cg1 = CodegenArrayContraction(
        CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N, P, Q),
                                Permutation([0, 2, 3, 1, 4, 5, 7, 6])), (1, 2),
        (3, 5))
    cg2 = CodegenArrayContraction(
        CodegenArrayTensorProduct(
            M, N, P, CodegenArrayPermuteDims(Q, Permutation([1, 0]))), (1, 5),
        (2, 3))
    assert cg1 == cg2
    assert recognize_matrix_expression(cg1) == CodegenArrayTensorProduct(
        M * P.T * Trace(N), Q.T)
    assert recognize_matrix_expression(cg2) == CodegenArrayTensorProduct(
        M * P.T * Trace(N), Q.T)

    cg1 = CodegenArrayContraction(
        CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N, P, Q),
                                Permutation([1, 0, 4, 6, 2, 7, 5, 3])), (0, 1),
        (2, 6), (3, 7))
    cg2 = CodegenArrayPermuteDims(
        CodegenArrayContraction(CodegenArrayTensorProduct(M, P, Q, N), (0, 1),
                                (2, 3), (4, 7)), [1, 0])
    assert cg1 == cg2

    cg1 = CodegenArrayContraction(
        CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N, P, Q),
                                Permutation([1, 0, 4, 6, 7, 2, 5, 3])), (0, 1),
        (2, 6), (3, 7))
    cg2 = CodegenArrayPermuteDims(
        CodegenArrayContraction(
            CodegenArrayTensorProduct(CodegenArrayPermuteDims(M, [1, 0]), N, P,
                                      Q), (0, 1), (3, 6), (4, 5)),
        Permutation([1, 0]))
    assert cg1 == cg2
コード例 #2
0
def compute_Gamma(g_deriv, gUP):
    """Return Christoffel symbols
    """
    g_derivT = Array([(g_deriv[:, :, i]).transpose() for i in range(4)])

    gUgd = tensorproduct(gUP, g_deriv)
    gUgdT = tensorproduct(gUP, g_derivT)

    return 1 / 2 * (tensorcontraction(gUgd, (1, 3)) + tensorcontraction(
        gUgdT, (1, 3)) - tensorcontraction(gUgd, (1, 2)))
コード例 #3
0
def test_arrayexpr_contraction_permutation_mix():

    Me = M.subs(k, 3).as_explicit()
    Ne = N.subs(k, 3).as_explicit()

    cg1 = ArrayContraction(PermuteDims(ArrayTensorProduct(M, N), Permutation([0, 2, 1, 3])), (2, 3))
    cg2 = ArrayContraction(ArrayTensorProduct(M, N), (1, 3))
    assert cg1 == cg2
    cge1 = tensorcontraction(permutedims(tensorproduct(Me, Ne), Permutation([0, 2, 1, 3])), (2, 3))
    cge2 = tensorcontraction(tensorproduct(Me, Ne), (1, 3))
    assert cge1 == cge2

    cg1 = PermuteDims(ArrayTensorProduct(M, N), Permutation([0, 1, 3, 2]))
    cg2 = ArrayTensorProduct(M, PermuteDims(N, Permutation([1, 0])))
    assert cg1 == cg2

    cg1 = ArrayContraction(
        PermuteDims(
            ArrayTensorProduct(M, N, P, Q), Permutation([0, 2, 3, 1, 4, 5, 7, 6])),
        (1, 2), (3, 5)
    )
    cg2 = ArrayContraction(
        ArrayTensorProduct(M, N, P, PermuteDims(Q, Permutation([1, 0]))),
        (1, 5), (2, 3)
    )
    assert cg1 == cg2

    cg1 = ArrayContraction(
        PermuteDims(
            ArrayTensorProduct(M, N, P, Q), Permutation([1, 0, 4, 6, 2, 7, 5, 3])),
        (0, 1), (2, 6), (3, 7)
    )
    cg2 = PermuteDims(
        ArrayContraction(
            ArrayTensorProduct(M, P, Q, N),
            (0, 1), (2, 3), (4, 7)),
        [1, 0]
    )
    assert cg1 == cg2

    cg1 = ArrayContraction(
        PermuteDims(
            ArrayTensorProduct(M, N, P, Q), Permutation([1, 0, 4, 6, 7, 2, 5, 3])),
        (0, 1), (2, 6), (3, 7)
    )
    cg2 = PermuteDims(
        ArrayContraction(
            ArrayTensorProduct(PermuteDims(M, [1, 0]), N, P, Q),
            (0, 1), (3, 6), (4, 5)
        ),
        Permutation([1, 0])
    )
    assert cg1 == cg2
コード例 #4
0
 def init_riemann(self):
     """ Riemann tensor of the metric, which is a 4-index tensor. """
     riemann = sp.MutableDenseNDimArray(np.zeros((self.dim,)*4)) # Inizializing 4-index tensor
     dchr = sp.MutableDenseNDimArray(np.zeros((self.dim,)*4)) # Derivative of Christoffel symbols
     if isinstance(self.chr, type(None)):
         self.init_chr() # Initialize Christoffel symbols (if not already done)
     for mu in range(self.dim):
         dchr[:,:,:,mu] = sp.diff(self.chr, self.variables[mu])
     for sigma in range(self.dim):
         for rho in range(self.dim):
             riemann[rho,sigma,:,:] = dchr[rho,:,sigma,:].transpose() - dchr[rho,:,sigma,:] \
                     + sp.tensorcontraction(sp.tensorproduct(self.chr[rho,:,:], self.chr[:,:,sigma]),(1,2)) \
                     - (sp.tensorcontraction(sp.tensorproduct(self.chr[rho,:,:], self.chr[:,:,sigma]),(1,2))).transpose()
     self.riemann = sp.simplify(riemann)
コード例 #5
0
ファイル: ricci.py プロジェクト: ycs0405/einsteinpy
    def from_riemann(cls, riemann, parent_metric=None):
        """
        Get Ricci Tensor calculated from Riemann Tensor

        Parameters
        ----------
        riemann : ~einsteinpy.symbolic.riemann.RiemannCurvatureTensor
           Riemann Tensor
        parent_metric : ~einsteinpy.symbolic.metric.MetricTensor or None
            Corresponding Metric for the Ricci Tensor.
            None if it should inherit the Parent Metric of Riemann Tensor.
            Defaults to None.

        """
        if not riemann.config == "ulll":
            riemann = riemann.change_config(newconfig="ulll",
                                            metric=parent_metric)
        if parent_metric is None:
            parent_metric = riemann.parent_metric
        return cls(
            simplify_sympy_array(
                sympy.tensorcontraction(riemann.tensor(), (0, 2))),
            riemann.syms,
            config="ll",
            parent_metric=parent_metric,
        )
コード例 #6
0
def test_EinsteinTensor_trace_negetive_of_Ricci_Scalar_in_4D(metric):
    # https://en.wikipedia.org/wiki/Einstein_tensor#Trace
    G1 = EinsteinTensor.from_metric(metric)
    G2 = G1.change_config("ul")
    val1 = simplify(tensorcontraction(G2.tensor(), (0, 1)))
    val2 = RicciScalar.from_metric(metric).expr
    assert simplify(val1 + val2) == 0
コード例 #7
0
ファイル: ricci.py プロジェクト: ycs0405/einsteinpy
    def from_riccitensor(cls, riccitensor, parent_metric=None):
        """
        Get Ricci Scalar calculated from Ricci Tensor

        Parameters
        ----------
        riccitensor: ~einsteinpy.symbolic.metric.RicciTensor
            Ricci Tensor
        parent_metric : ~einsteinpy.symbolic.metric.MetricTensor or None
            Corresponding Metric for the Ricci Scalar.
            Defaults to None.

        """

        if not riccitensor.config == "ul":
            riccitensor = riccitensor.change_config(newconfig="ul",
                                                    metric=parent_metric)
        if parent_metric is None:
            parent_metric = riccitensor.parent_metric
        ricci_scalar = tensorcontraction(riccitensor.tensor(), (0, 1))
        return cls(
            simplify_sympy_array(ricci_scalar),
            riccitensor.syms,
            parent_metric=parent_metric,
        )
コード例 #8
0
def partial_trace(dm, n_qubits, subsystem):
    # This is the same calc. as for the tf_qc/qc.trace
    # First we find the last consecutive block to trace away
    block_end = subsystem[-1]
    block_start = block_end
    for idx in reversed(subsystem):
        if block_start - idx <= 1:
            block_start = idx
        else:
            break
    n_static1 = block_start  # First part of static qubits
    n_static2 = (n_qubits - 1) - block_end  # Second part of static qubits
    n_static = n_static1 + n_static2
    n_qubits2trace = block_end - block_start + 1  # Qubits to trace away

    new_shape = [
        2**n_static1, 2**n_qubits2trace, 2**n_static2, 2**n_static1,
        2**n_qubits2trace, 2**n_static2
    ]

    flat_dm = sp.flatten(dm)
    new_dm = sp.NDimArray(flat_dm, shape=new_shape)
    trace_result = tensorcontraction(new_dm, (1, 4))
    reshaped_result = trace_result.reshape(2**n_static, 2**n_static)
    # We must now recursively to the same to the lest of the subsystems
    idx_of_start = subsystem.index(block_start)
    new_subsystem = subsystem[:idx_of_start]
    return partial_trace(reshaped_result, n_static, new_subsystem) if len(
        new_subsystem) > 0 else reshaped_result.tomatrix()
コード例 #9
0
ファイル: entropy_hughes_V.py プロジェクト: qiqi/adFVM
def tensordot(a, b):
    s1 = a.shape
    s2 = b.shape
    assert s1[-1] == s2[0]
    s3 = s1[:-1] + s2[1:]
    k = len(s1) - 1
    return tensorcontraction(tensorproduct(a, b), (k, k + 1))
コード例 #10
0
def test_array_as_explicit_call():

    assert ZeroArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray.zeros(
        3, 2, 4)
    assert OneArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray(
        [1 for i in range(3 * 2 * 4)]).reshape(3, 2, 4)

    k = Symbol("k")
    X = ArraySymbol("X", k, 3, 2)
    raises(ValueError, lambda: X.as_explicit())
    raises(ValueError, lambda: ZeroArray(k, 2, 3).as_explicit())
    raises(ValueError, lambda: OneArray(2, k, 2).as_explicit())

    A = ArraySymbol("A", 3, 3)
    B = ArraySymbol("B", 3, 3)

    texpr = tensorproduct(A, B)
    assert isinstance(texpr, ArrayTensorProduct)
    assert texpr.as_explicit() == tensorproduct(A.as_explicit(),
                                                B.as_explicit())

    texpr = tensorcontraction(A, (0, 1))
    assert isinstance(texpr, ArrayContraction)
    assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2]

    texpr = tensordiagonal(A, (0, 1))
    assert isinstance(texpr, ArrayDiagonal)
    assert texpr.as_explicit() == ImmutableDenseNDimArray(
        [A[0, 0], A[1, 1], A[2, 2]])

    texpr = permutedims(A, [1, 0])
    assert isinstance(texpr, PermuteDims)
    assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0])
コード例 #11
0
def geodesic_ncurve(surface: ParametricSurface, ic_uv, ic_uv_t, t1=5, dt=0.05):
    from sympy import lambdify
    from scipy.integrate import ode as sciode
    import numpy as np
    from sympy import symbols, Function, Array, tensorproduct, tensorcontraction
    t = symbols('t', real=True)
    u = Function(surface.sym(0), real=True)(t)
    v = Function(surface.sym(1), real=True)(t)

    second_term_tensor = tensorproduct(
        surface.christoffel_symbol.tensor().subs(
            {surface.sym(0):u, surface.sym(1):v}),
        Array([u, v]).diff(t), 
        Array([u, v]).diff(t))
    second_term_tensor = tensorcontraction(second_term_tensor, (1, 3), (2, 4))

    u_t = Function(str(u)+'^{\prime}', real=True)(t)
    v_t = Function(str(v)+'^{\prime}', real=True)(t)
    lambdify_sympy = lambdify((u, u_t, v, v_t), [
        u_t, 
        -second_term_tensor[0].subs({u.diff(t):u_t, v.diff(t):v_t}), 
        v_t,
        -second_term_tensor[1].subs({u.diff(t):u_t, v.diff(t):v_t})])
    
    x0, t0 = [ic_uv[0], ic_uv_t[0], ic_uv[1], ic_uv_t[1]], 0.0
    scioder = sciode(lambda t,X: lambdify_sympy(*X)).set_integrator('vode', method='bdf')
    scioder.set_initial_value(x0, t0)
    num_of_t = int(t1 / dt); # num_of_t
    u_arr = np.empty((num_of_t, 4)); u_arr[0] = x0
    t_arr = np.arange(num_of_t) * dt
    i = 0
    while scioder.successful() and i < num_of_t-1:
        i += 1
        u_arr[i] = scioder.integrate(scioder.t+dt)
    return t_arr, (u_arr[:, 0], u_arr[:, 2])
コード例 #12
0
def test_weyl_contraction_1st_3rd_indices_zero():
    mw1 = anti_de_sitter_metric()
    w1 = WeylTensor.from_metric(mw1)
    t1 = simplify_sympy_array(
        sympy.tensorcontraction(w1.change_config("ulll").tensor(), (0, 2)))
    t0 = sympy.Array(np.zeros(shape=t1.shape, dtype=int))
    assert t1 == t0
コード例 #13
0
def tensor_contractions(T, indeces):
    """If indeces is a tuple with two integers, then perform the contraction
    between those two indeces. If indeces is a list of tuples, then apply
    multiple contractions.
    """
    if (type(indeces) is tuple):
        ret = tensorcontraction(T, indeces)
    elif (type(indeces) is list):
        T = tensorcontraction(T, indeces[0])
        for i in range(1, len(indeces)):
            T = tensorcontraction(
                T, find_indeces_for_contraction(indeces[i - 1], indeces[i]))
        ret = T
    else:
        raise TypeError("indeces in tensor_contractions has to be a list of \
tuples or a tuple")

    return ret
コード例 #14
0
def compute_Upsilon(g_deriv, uUP):
    """Return Upsilon =

    Upsilon_ alpha beta = g_beta gamma, alpha uUP^gamma

    """

    gdU = tensorproduct(g_deriv, uUP)

    return -Matrix(tensorcontraction(gdU, (1, 3)))
コード例 #15
0
def LineElement():
    print("Starting Test: Line Element...")
    try:
        t, x, y, z = sympy.symbols("t x y z")
        dt, dx, dy, dz = sympy.symbols('dt dx dy dz')
        eta = bt.GRMetric([t, x, y, z],
                          sympy.Matrix([[-1, 0, 0, 0], [0, 1, 0, 0],
                                        [0, 0, 1, 0], [0, 0, 0, 1]]))
        dx = bt.GRTensor([eta], sympy.Array([dt, dx, dy, dz]))
        rhs_ = sympy.tensorcontraction(
            sympy.tensorproduct(eta.lowered, dx.vals), (1, 2))
        rhs = sympy.tensorcontraction(sympy.tensorproduct(rhs_, dx.vals),
                                      (0, 1))
        assert rhs + dt**2 - dx**2 - dy**2 - dz**2 == 0
        print("Test: Line Element - Passed")
        return 1
    except:
        print(rhs)
        print(type(rhs))
        print("Test: Line Element - Failed")
        return 0
コード例 #16
0
ファイル: tensor.py プロジェクト: vineetg3/einsteinpy
    def lorentz_transform(self, transformation_matrix):
        """
        Performs a Lorentz transform on the tensor.

        Parameters
        ----------
            transformation_matrix : ~sympy.tensor.array.dense_ndim_array.ImmutableDenseNDimArray or list
                Sympy Array or multi-dimensional list containing Sympy Expressions

        Returns
        -------
            ~einsteinpy.symbolic.tensor.BaseRelativityTensor
                lorentz transformed tensor(or vector)

        """
        tm = sympy.Array(transformation_matrix)
        t = self.tensor()
        for i in range(self.order):
            if self.config[i] == "u":
                t = simplify(
                    tensorcontraction(tensorproduct(tm, t), (1, 2 + i)))
            else:
                t = simplify(
                    tensorcontraction(tensorproduct(tm, t), (0, 2 + i)))
            tmp = np.array(t.tolist()).reshape(t.shape)
            source, dest = list(range(len(t.shape))), list(range(len(t.shape)))
            dest.pop(i)
            dest.insert(0, i)
            tmp = np.moveaxis(tmp, source, dest)
            t = sympy.Array(tmp)

        return BaseRelativityTensor(
            t,
            syms=self.syms,
            config=self.config,
            parent_metric=None,
            variables=self.variables,
            functions=self.functions,
        )
コード例 #17
0
def CovDeriv():
    print("Starting Test: Covariant Derivative...")
    x, y = sympy.symbols('x y')
    test_metric = sympy.Matrix([[1, 0], [0, x**2]])
    g = bt.GRMetric([x, y], metric=test_metric)
    a, b = sympy.symbols('a b')
    A = bt.GRTensor([a],
                    sympy.Array(
                        [x * sympy.cos(2 * y), -x * x * sympy.sin(2 * y)]))
    A.raise_index(a, g)
    Aab = bt.CovariantDerivative(b, [x, y], g, A)
    # print(Aab.vals)
    assert sympy.tensorcontraction(Aab.vals, (0, 1)) == 0
    return 1
コード例 #18
0
def tensor3_vector_product(T, v):
    """Implements a product of a rank-3 tensor (3D array) with a vector using
    tensor product and tensor contraction.

    Parameters
    ----------

    T: sp.Array of dimensions n x m x k

    v: sp.Array of dimensions k x 1

    Returns
    -------

    A: sp.Array of dimensions n x m

    Example
    -------

    >>>T = sp.Array([[[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]],
                     [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]])
    ⎡⎡1  4  7  10⎤  ⎡13  16  19  22⎤⎤
    ⎢⎢           ⎥  ⎢              ⎥⎥
    ⎢⎢2  5  8  11⎥  ⎢14  17  20  23⎥⎥
    ⎢⎢           ⎥  ⎢              ⎥⎥
    ⎣⎣3  6  9  12⎦  ⎣15  18  21  24⎦⎦
    >>>v = sp.Array([1, 2, 3, 4]).reshape(4, 1)
    ⎡1⎤
    ⎢ ⎥
    ⎢2⎥
    ⎢ ⎥
    ⎢3⎥
    ⎢ ⎥
    ⎣4⎦
    >>>tensor3_vector_product(T, v)
    ⎡⎡70⎤  ⎡190⎤⎤
    ⎢⎢  ⎥  ⎢   ⎥⎥
    ⎢⎢80⎥  ⎢200⎥⎥
    ⎢⎢  ⎥  ⎢   ⎥⎥
    ⎣⎣90⎦  ⎣210⎦⎦

    """
    import sympy as sp
    assert (T.rank() == 3)
    # reshape v to ensure 1D vector so that contraction do not contain x 1
    # dimension
    v.reshape(v.shape[0], )
    p = sp.tensorproduct(T, v)
    return sp.tensorcontraction(p, (2, 3))
コード例 #19
0
ファイル: tensor.py プロジェクト: troywei123/einsteinpy
def tensor_product(tensor1, tensor2, i=None, j=None):
    """Tensor Product of ``tensor1`` and ``tensor2``

    Parameters
    ----------
    tensor1 : ~einsteinpy.symbolic.BaseRelativityTensor
    tensor2 : ~einsteinpy.symbolic.BaseRelativityTensor
    i : int, optional
        contract ``i``th index of ``tensor1``
    j : int, optional
        contract ``j``th index of ``tensor2``


    Returns
    -------
    ~einsteinpy.symbolic.BaseRelativityTensor
        tensor of appropriate rank

    Raises
    ------
    ValueError
        Raised when ``i`` and ``j`` both indicate 'u' or 'l' indices
    """
    product = tensorproduct(tensor1.arr, tensor2.arr)

    if (i or j) is None:
        newconfig = tensor1.config + tensor2.config
    else:
        if tensor1.config[i] == tensor2.config[j]:
            raise ValueError(
                "Index summation not allowed between %s and %s indices"
                % (tensor1.config[i], tensor2.config[j])
            )

        product = simplify(tensorcontraction(product, (i, len(tensor1.config) + j)))

        con = tensor1.config[:i] + tensor1.config[i + 1 :]
        fig = tensor2.config[:j] + tensor2.config[j + 1 :]
        newconfig = con + fig

    return BaseRelativityTensor(
        product,
        syms=tensor1.syms,
        config=newconfig,
        parent_metric=tensor1.parent_metric,
        variables=tensor1.variables,
        functions=tensor1.functions,
    )
コード例 #20
0
ファイル: tensor.py プロジェクト: yf-hang/einsteinpy
 def chain_config_change():
     t = sympy.Array(tensor.tensor())
     difflist = _difference_list(newconfig, tensor.config)
     for i, action in enumerate(difflist):
         if action == 0:
             continue
         else:
             t = simplify(
                 tensorcontraction(tensorproduct(met_dict[action], t),
                                   (1, 2 + i)))
             # reshuffle the indices
             dest = list(range(len(t.shape)))
             dest.remove(0)
             dest.insert(i, 0)
             t = sympy.permutedims(t, dest)
     return t
コード例 #21
0
def test_covdiff():
    """
    Test the covariant derivative of a tensor on the 2-sphere.
    The contraction of the specified tensor after being raised should be 0,
    because it is a divergenceless tensor.
    """
    x, y = sympy.symbols('x y')
    test_metric = sympy.Matrix([[1, 0], [0, x**2]])
    g = bt.GRMetric([x, y], metric=test_metric)
    a, b = sympy.symbols('a b')
    A = bt.GRTensor([a],
                    sympy.Array(
                        [x * sympy.cos(2 * y), -x * x * sympy.sin(2 * y)]))
    A.raise_index(a, g)
    Aab = bt.CovariantDerivative(b, [x, y], g, A)
    assert sympy.tensorcontraction(Aab.vals, (0, 1)) == 0
    return 1
コード例 #22
0
ファイル: tensor.py プロジェクト: vineetg3/einsteinpy
 def chain_config_change():
     t = sympy.Array(tensor.tensor())
     difflist = _difference_list(newconfig, tensor.config)
     for i, action in enumerate(difflist):
         if action == 0:
             continue
         else:
             t = simplify(
                 tensorcontraction(tensorproduct(met_dict[action], t),
                                   (1, 2 + i)))
             # reshuffle the indices
             tmp = np.array(t.tolist()).reshape(t.shape)
             source, dest = list(range(len(t.shape))), list(
                 range(len(t.shape)))
             dest.pop(i)
             dest.insert(0, i)
             tmp = np.moveaxis(tmp, source, dest)
             t = sympy.Array(tmp)
     return t
コード例 #23
0
def test_array_as_explicit_matrix_symbol():

    A = MatrixSymbol("A", 3, 3)
    B = MatrixSymbol("B", 3, 3)

    texpr = tensorproduct(A, B)
    assert isinstance(texpr, ArrayTensorProduct)
    assert texpr.as_explicit() == tensorproduct(A.as_explicit(),
                                                B.as_explicit())

    texpr = tensorcontraction(A, (0, 1))
    assert isinstance(texpr, ArrayContraction)
    assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2]

    texpr = tensordiagonal(A, (0, 1))
    assert isinstance(texpr, ArrayDiagonal)
    assert texpr.as_explicit() == ImmutableDenseNDimArray(
        [A[0, 0], A[1, 1], A[2, 2]])

    texpr = permutedims(A, [1, 0])
    assert isinstance(texpr, PermuteDims)
    assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0])
コード例 #24
0
def test_array_symbol_and_element():
    A = ArraySymbol("A", 2)
    A0 = ArrayElement(A, (0,))
    A1 = ArrayElement(A, (1,))
    assert A.as_explicit() == ImmutableDenseNDimArray([A0, A1])

    A2 = tensorproduct(A, A)
    assert A2.shape == (2, 2)
    # TODO: not yet supported:
    # assert A2.as_explicit() == Array([[A[0]*A[0], A[1]*A[0]], [A[0]*A[1], A[1]*A[1]]])
    A3 = tensorcontraction(A2, (0, 1))
    assert A3.shape == ()
    # TODO: not yet supported:
    # assert A3.as_explicit() == Array([])

    A = ArraySymbol("A", 2, 3, 4)
    Ae = A.as_explicit()
    assert Ae == ImmutableDenseNDimArray(
        [[[ArrayElement(A, (i, j, k)) for k in range(4)] for j in range(3)] for i in range(2)])

    p = permutedims(A, Permutation(0, 2, 1))
    assert isinstance(p, PermuteDims)
コード例 #25
0
def tensor_contract(T, ind1, ind2):
    """Contract two indices of a tensor.
    
    Arguments:
    T (Tensor) -- Tensor to contract
    ind1 (Symbol) -- First index to contract.
    ind2 (Symbol) -- Second index to contract.
    
    Return Type -- Tensor
    """
    if ind1 * ind2 > 0:
        raise AttributeError("Index 1 and 2 cannot be in the same state")

    if ind1 not in T.indices or ind2 not in T.indices:
        raise AttributeError(f"Index {ind1} or {ind2} not found in tensor T")

    i1 = T.indices.index(ind1)
    i2 = T.indices.index(ind2)

    new_indices = T.indices
    new_indices.remove(ind1)
    new_indices.remove(ind2)
    new_vals = sympy.tensorcontraction(T.vals, (i1, i2))
    return objects.Tensor(new_indices, new_vals)
コード例 #26
0
            exec('W.append(w' + str(FromLayer) + str(i) + str(j) + ')')
    W = sp.Array(W)
    W = W.reshape(outputNeurons, inputNeurons)
    return (W)

#_weights_between_layers_1_and_2____
W_l1tol2 = weights(1, 4, 4)
#_weights_between_layers_2_and_3____
W_l2tol3 = weights(2, 4, 4)
#_weights_between_layers_3_and_4____
W_l3tol4 = weights(3, 4, 2)
#_weights_between_layers_4_and_5____
W_l4tol5 = weights(4, 2, 1)
#______________________________________________________________________________

#_function_definition_and_input______________
f = lambda x: 1 / (1 + sp.exp(-x))
input = sp.Array([inp1, inp2, inp3, inp4])
#_____________________________________________

#_Outputs_____________________________________________________________________
o_l1 = [f(input[0]), f(input[1]), f(input[2]), f(input[3])]
o_l2 = sp.tensorcontraction(
    sp.tensorproduct([o_l1], W_l1tol2)[0, :, :, :].applyfunc(f), (0, 1))
o_l3 = sp.tensorcontraction(
    sp.tensorproduct([o_l2], W_l2tol3)[0, :, :, :].applyfunc(f), (0, 1))
o_l4 = sp.tensorcontraction(
    sp.tensorproduct([o_l3], W_l3tol4)[0, :, :, :].applyfunc(f), (0, 1))
o_l5 = sp.tensorcontraction(
    sp.tensorproduct([o_l4], W_l4tol5)[0, :, :, :].applyfunc(f), (0, 1))
#______________________________________________________________________________
コード例 #27
0
def row_wise_div(tensor):
    return sym.Matrix(
        tensorcontraction(derive_by_array(tensor, [x, y]), (0, 2)))
コード例 #28
0
ファイル: array_expressions.py プロジェクト: dagidagi1/Matrix
 def as_explicit(self):
     return tensorcontraction(self.expr.as_explicit(), *self.contraction_indices)
コード例 #29
0
W_l2tol3 = weights(2, 4, 4)
#_weights_between_layers_3_and_4____
W_l3tol4 = weights(3, 4, 2)
#_weights_between_layers_4_and_5____
W_l4tol5 = weights(4, 2, 1)
#______________________________________________________________________________

#_function_definition_and_input______________
f = lambda x: 1 / (1 + sp.exp(-x))
input = sp.Array([inp1, inp2, inp3, inp4])
#_____________________________________________

#_Outputs_____________________________________________________________________
o_l1 = sp.Array([[f(input[0]), f(input[1]), f(input[2]), f(input[3])]])
o_l2 = sp.Array([
    sp.tensorcontraction(
        sp.tensorproduct(o_l1, W_l1tol2)[0, :, :, :].applyfunc(f), (0, 1))
])
o_l3 = sp.Array([
    sp.tensorcontraction(
        sp.tensorproduct(o_l2, W_l2tol3)[0, :, :, :].applyfunc(f), (0, 1))
])
o_l4 = sp.Array([
    sp.tensorcontraction(
        sp.tensorproduct(o_l3, W_l3tol4)[0, :, :, :].applyfunc(f), (0, 1))
])
o_l5 = sp.Array([
    sp.tensorcontraction(
        sp.tensorproduct(o_l4, W_l4tol5)[0, :, :, :].applyfunc(f), (0, 1))
])
#______________________________________________________________________________
コード例 #30
0
ファイル: ST-0003.py プロジェクト: ailever/openapi
def SARIMACorrelation(trendparams: tuple = (0, 0, 0),
                      seasonalparams: tuple = (0, 0, 0, 1),
                      trendAR=None,
                      trendMA=None,
                      seasonAR=None,
                      seasonMA=None):
    p, d, q = trendparams
    P, D, Q, m = seasonalparams
    print(f'SARIMA({p},{d},{q})({P},{D},{Q},{m})')

    assert type(p) is int, 'Input parameter "p" is not an integer type.'
    assert type(d) is int, 'Input parameter "d" is not an integer type.'
    assert type(q) is int, 'Input parameter "q" is not an integer type.'
    assert type(P) is int, 'Input parameter "P" is not an integer type.'
    assert type(D) is int, 'Input parameter "D" is not an integer type.'
    assert type(Q) is int, 'Input parameter "Q" is not an integer type.'
    assert type(m) is int, 'Input parameter "m" is not an integer type.'

    if trendAR:
        assert len(
            trendAR
        ) == p, f'The len(trendAR) must be {p}. Reset the parameters.'
    else:
        trendAR = [0] * p
    if trendMA:
        assert len(
            trendMA
        ) == q, f'The len(trendMA) must be {q}. Reset the parameters.'
    else:
        trendMA = [0] * q
    if seasonAR:
        assert len(
            seasonAR
        ) == P, f'The len(seasonAR) must be {P}. Reset the parameters.'
    else:
        seasonAR = [0] * P
    if seasonMA:
        assert len(
            seasonMA
        ) == Q, f'The len(seasonMA) must be {Q}. Reset the parameters.'
    else:
        seasonMA = [0] * Q

    Y_order = p + P * m + d + D * m
    e_order = q + Q * m

    # define Y, e
    Y, e = sympy.symbols('Y_t, e_t')
    I, J = sympy.symbols('i, j')
    Y_ = {}
    e_ = {}
    Y_['t'] = Y
    Y__ = [[Y_['t']]]
    e_['t'] = e
    e__ = [[e_['t']]]
    for i in range(1, Y_order + 1):
        Y_[f't-{i}'] = sympy.symbols(f'Y_t-{i}')
        Y__.append([
            Y_[f't-{i}'] * (I**i)
        ])  # Y__ = [ [Y_['t']], [Y_['t-1']], ..., [Y_['t-(p+P*m+q+Q*m)']] ]
    for i in range(1, e_order + 1):
        e_[f't-{i}'] = sympy.symbols(f'e_t-{i}')
        e__.append([
            e_[f't-{i}'] * (J**i)
        ])  # e__ = [ [e_['t']], [e_['t-1']], ..., [e_['t-(q+Q*m)']] ]

    # define L
    L = sympy.symbols('L')
    S_Lag = L**m
    T_Lag = L
    S_Lag_Diff = (1 - L**m)**D
    T_Lag_Diff = (1 - L)**d

    # define coefficients : phis(T), Phis(S), thetas(T), Thetas(S)
    T_phi = {}
    T_phis = []
    L_byT_phi = []
    S_phi = {}
    S_phis = []
    L_byS_phi = []
    T_theta = {}
    T_thetas = []
    L_byT_theta = []
    S_theta = {}
    S_thetas = []
    L_byS_theta = []

    for p_ in range(0, p + 1):
        T_phi[p_] = sympy.symbols(f'phi_{p_}')
        T_phis.append(
            -T_phi[p_])  # T_phis      = [T_phi[0], T_phi[1], ..., T_phi[p]]
        L_byT_phi.append([T_Lag**p_
                          ])  # L_byT_phi   = [[L**0], [L**1], ..., [L**p]]
    for P_ in range(0, P + 1):
        S_phi[P_] = sympy.symbols(f'Phi_{P_}')
        S_phis.append(
            -S_phi[P_])  # S_phis      = [S_phi[0], S_phi[1], ..., S_phi[P]]
        L_byS_phi.append([
            S_Lag**P_
        ])  # L_byS_phi   = [[(L**m)**0], [(L**m)**1], ..., [(L**m)**P]]
    for q_ in range(0, q + 1):
        T_theta[q_] = sympy.symbols(f'theta_{q_}')
        T_thetas.append(
            T_theta[q_]
        )  # T_thetas    = [T_theta[0], T_theta[1], ..., T_theta[q]]
        L_byT_theta.append([T_Lag**q_
                            ])  # L_byT_theta = [[L**0], [L**1], ..., [L**q]]
    for Q_ in range(0, Q + 1):
        S_theta[Q_] = sympy.symbols(f'Theta_{Q_}')
        S_thetas.append(
            S_theta[Q_]
        )  # S_thetas    = [T_theta[0], T_theta[1], ..., T_theta[Q]]
        L_byS_theta.append([
            S_Lag**Q_
        ])  # L_byS_theta = [[(L**m)**0], [(L**m)**1], ..., [(L**m)**Q]]

    T_phi_Lag = sympy.Matrix([T_phis]) * sympy.Matrix(L_byT_phi)
    S_phi_Lag = sympy.Matrix([S_phis]) * sympy.Matrix(L_byS_phi)
    T_theta_Lag = sympy.Matrix([T_thetas]) * sympy.Matrix(L_byT_theta)
    S_theta_Lag = sympy.Matrix([S_thetas]) * sympy.Matrix(L_byS_theta)

    Y_operator = (T_phi_Lag * S_phi_Lag * T_Lag_Diff * S_Lag_Diff).subs(
        T_phi[0], -1).subs(S_phi[0], -1)[0]
    e_operator = (T_theta_Lag * S_theta_Lag).subs(T_theta[0],
                                                  1).subs(S_theta[0], 1)[0]

    Y_operation = sympy.collect(Y_operator.expand(), L)
    e_operation = sympy.collect(e_operator.expand(), L)

    Y_coeff = sympy.Poly(Y_operation, L).all_coeffs()[::-1]
    e_coeff = sympy.Poly(e_operation, L).all_coeffs()[::-1]

    Y_term = sympy.Matrix([Y_coeff]) * sympy.Matrix(Y__)  # left-side
    e_term = sympy.Matrix([e_coeff]) * sympy.Matrix(e__)  # right-side

    Time_Series = {}
    Time_Series['Y_t(i,j)'] = sympy.Poly(Y - Y_term[0] + e_term[0], (I, J))
    Time_Series['Y_t'] = Time_Series['Y_t(i,j)'].subs(I, 1).subs(J, 1)
    for i in range(1, int(p + P * m + d + D * m) + 1):
        Time_Series['Y_t'] = sympy.collect(Time_Series['Y_t'],
                                           Y_[f't-{i}']).simplify()
    for i in range(1, int(q + Q * m) + 1):
        Time_Series['Y_t'] = sympy.collect(Time_Series['Y_t'],
                                           e_[f't-{i}']).simplify()
    print('* Time Series Equation(Analytic Form)')
    sympy.pprint(Time_Series['Y_t'])
    print()

    Time_Series['Analytic_Coeff_of_Y'] = Time_Series['Y_t(i,j)'].subs(
        J, 0).all_coeffs()[::-1]
    Time_Series['Analytic_Coeff_of_e'] = Time_Series['Y_t(i,j)'].subs(
        I, 0).all_coeffs()[::-1]

    Time_Series['Numeric_Coeff_of_Y'] = Time_Series['Y_t(i,j)'].subs(
        J, 0) - e_['t']
    Time_Series['Numeric_Coeff_of_e'] = Time_Series['Y_t(i,j)'].subs(I, 0)
    for i, (phi, Np) in enumerate(zip(list(T_phi.values())[1:], trendAR)):
        Time_Series['Numeric_Coeff_of_Y'] = Time_Series[
            'Numeric_Coeff_of_Y'].subs(phi, Np)
    for i, (Phi, NP) in enumerate(zip(list(S_phi.values())[1:], seasonAR)):
        Time_Series['Numeric_Coeff_of_Y'] = Time_Series[
            'Numeric_Coeff_of_Y'].subs(Phi, NP)
    for i, (theta, Nt) in enumerate(zip(list(T_theta.values())[1:], trendMA)):
        Time_Series['Numeric_Coeff_of_e'] = Time_Series[
            'Numeric_Coeff_of_e'].subs(theta, Nt)
    for i, (Theta, NT) in enumerate(zip(list(S_theta.values())[1:], seasonMA)):
        Time_Series['Numeric_Coeff_of_e'] = Time_Series[
            'Numeric_Coeff_of_e'].subs(Theta, NT)
    print('* Time Series Equation(Numeric Form)')
    sympy.pprint((Time_Series['Numeric_Coeff_of_Y'] +
                  Time_Series['Numeric_Coeff_of_e']).subs(I, 1).subs(J, 1))
    Time_Series['Numeric_Coeff_of_Y'] = sympy.Poly(
        Time_Series['Numeric_Coeff_of_Y'], I).all_coeffs()[::-1]
    Time_Series['Numeric_Coeff_of_e'] = sympy.Poly(
        Time_Series['Numeric_Coeff_of_e'], J).all_coeffs()[::-1]

    final_coeffs = [[], []]
    print('\n* Y params')
    print(f'- TAR({trendparams[0]}) phi : {trendAR}')
    print(f'- TMA({trendparams[2]}) theta : {trendMA}')
    for i, (A_coeff_Y, N_coeff_Y) in enumerate(
            zip(Time_Series['Analytic_Coeff_of_Y'],
                Time_Series['Numeric_Coeff_of_Y'])):
        if i == 0:
            pass
        elif i != 0:
            A_coeff_Y = A_coeff_Y.subs(Y_[f"t-{i}"], 1)
            N_coeff_Y = N_coeff_Y.subs(Y_[f"t-{i}"], 1)
            print(f't-{i} : {A_coeff_Y} > {round(N_coeff_Y, 5)}')
            final_coeffs[0].append(N_coeff_Y)

    print('\n* e params')
    print(f'- SAR({seasonalparams[0]}) Phi : {seasonAR}')
    print(f'- SMA({seasonalparams[2]}) Theta : {seasonMA}')
    for i, (A_coeff_e, N_coeff_e) in enumerate(
            zip(Time_Series['Analytic_Coeff_of_e'],
                Time_Series['Numeric_Coeff_of_e'])):
        if i == 0:
            A_coeff_e = A_coeff_e.subs(e_[f"t"], 1)
            N_coeff_e = N_coeff_e.subs(e_[f"t"], 1)
            print(f't-{i} : {A_coeff_e} > {1}')

        elif i != 0:
            A_coeff_e = A_coeff_e.subs(e_[f"t-{i}"], 1)
            N_coeff_e = N_coeff_e.subs(e_[f"t-{i}"], 1)
            print(f't-{i} : {A_coeff_e} > {round(N_coeff_e, 5)}')
            final_coeffs[1].append(N_coeff_e)

    print('\n* Quasi-Convergence Factor')
    try:
        print(
            'Y :',
            sympy.tensorcontraction(
                sympy.Array(final_coeffs[0]).applyfunc(lambda x: x**2), (0, )))
        print(
            'e :',
            sympy.tensorcontraction(
                sympy.Array(final_coeffs[1]).applyfunc(lambda x: x**2), (0, )))
    except:
        pass

    _, axes = plt.subplots(5, 1, figsize=(12, 15))
    ar_params = np.array(final_coeffs[0])
    ma_params = np.array(final_coeffs[1])
    ar, ma = np.r_[1, -ar_params], np.r_[1, ma_params]
    y = smt.ArmaProcess(ar, ma).generate_sample(300, burnin=50)

    axes[0].plot(y, 'o-')
    axes[0].grid(True)

    axes[1].stem(smt.ArmaProcess(ar, ma).acf(lags=40))
    axes[1].set_xlim(-1, 41)
    axes[1].set_ylim(-1.1, 1.1)
    axes[1].set_title(
        "Theoretical autocorrelation function of an SARIMA process")
    axes[1].grid(True)

    axes[2].stem(smt.ArmaProcess(ar, ma).pacf(lags=40))
    axes[2].set_xlim(-1, 41)
    axes[2].set_ylim(-1.1, 1.1)
    axes[2].set_title(
        "Theoretical partial autocorrelation function of an SARIMA process")
    axes[2].grid(True)

    smt.graphics.plot_acf(y, lags=40, ax=axes[3])
    axes[3].set_xlim(-1, 41)
    axes[3].set_ylim(-1.1, 1.1)
    axes[3].set_title(
        "Experimental autocorrelation function of an SARIMA process")
    axes[3].grid(True)

    smt.graphics.plot_pacf(y, lags=40, ax=axes[4])
    axes[4].set_xlim(-1, 41)
    axes[4].set_ylim(-1.1, 1.1)
    axes[4].set_title(
        "Experimental partial autocorrelation function of an SARIMA process")
    axes[4].grid(True)

    plt.tight_layout()
    plt.show()