Example #1
0
def deriving_finite_difference_approximations(u, x, h, hs, order=1, errors=1):
    '''See also `1_2.py`.
    '''
    # the method of undetermined coefficients
    length = len(hs)
    hs = sympy.Matrix([hs])
    _hs = sympy.ones(1, length)
    A = sympy.ones(length)
    for ith in range(1, length):
        _hs = _hs.multiply_elementwise(hs)
        A[ith, :] = _hs
    b = sympy.zeros(length, 1)
    b[order] = sympy.factorial(order)
    coefficients = A.solve(b)
    # errors
    _errors = [None] * errors
    Du = u(x).diff(x, length-1)
    denominator = sympy.factorial(length-1)
    for ith in range(errors):
        denominator *= length + ith
        _hs = _hs.multiply_elementwise(hs)
        coefficient = coefficients.dot(_hs) / denominator
        Du = Du.diff()
        _errors[ith] = coefficient * h**(length+ith-order) * Du
    # finite difference approximations
    Du = sum(c*u(x+a*h) for c, a in zip(coefficients, hs))
    return Du/h**order, sum(_errors)
def calculate_proba_assessment(assessment_rules):
    if (sym.ones(1, assessment_rules.shape[0]) * assessment_rules)[0, 0] == 4:
        proba_assessment = 1
    elif (sym.ones(1, assessment_rules.shape[0]) * assessment_rules)[0,
                                                                     0] == 3:
        proba_assessment = 1 - (proba_matrix_second_order *
                                (sym.ones(4, 1) - assessment_rules))[0, 0]
    else:
        proba_assessment = (proba_matrix_second_order * assessment_rules)[0, 0]
    return (proba_assessment)
Example #3
0
def main():
    a=Symbol("a", real=True)
    b=Symbol("b", real=True)
    c=Symbol("c", real=True)

    p = (a,b,c)

    assert u(p, 1).D * u(p, 2) == Matrix(1, 1, [0])
    assert u(p, 2).D * u(p, 1) == Matrix(1, 1, [0])

    p1,p2,p3 =[Symbol(x, real=True) for x in ["p1","p2","p3"]]
    pp1,pp2,pp3 =[Symbol(x, real=True) for x in ["pp1","pp2","pp3"]]
    k1,k2,k3 =[Symbol(x, real=True) for x in ["k1","k2","k3"]]
    kp1,kp2,kp3 =[Symbol(x, real=True) for x in ["kp1","kp2","kp3"]]

    p = (p1,p2,p3)
    pp = (pp1,pp2,pp3)

    k = (k1,k2,k3)
    kp = (kp1,kp2,kp3)

    mu = Symbol("mu")

    e = (pslash(p)+m*ones(4))*(pslash(k)-m*ones(4))
    f = pslash(p)+m*ones(4)
    g = pslash(p)-m*ones(4)


    #pprint(e)
    xprint( 'Tr(f*g)', Tr(f*g) )
    #print Tr(pslash(p) * pslash(k)).expand()

    M0 = [ ( v(pp, 1).D * mgamma(mu) * u(p, 1) ) * ( u(k, 1).D * mgamma(mu,True) * \
             v(kp, 1) ) for mu in range(4)]
    M = M0[0]+M0[1]+M0[2]+M0[3]
    M = M[0]
    assert isinstance(M, Basic)
    #print M
    #print simplify(M)

    d=Symbol("d", real=True) #d=E+m

    xprint('M', M)
    print "-"*40
    M = ((M.subs(E,d-m)).expand() * d**2 ).expand()
    xprint('M2', 1/(E+m)**2 * M)
    print "-"*40
    x,y= M.as_real_imag()
    xprint('Re(M)', x)
    xprint('Im(M)', y)
    e = x**2+y**2
    xprint('abs(M)**2', e)
    print "-"*40
    xprint('Expand(abs(M)**2)', e.expand())
Example #4
0
def main():
    a = Symbol("a", real=True)
    b = Symbol("b", real=True)
    c = Symbol("c", real=True)

    p = (a, b, c)

    assert u(p, 1).D * u(p, 2) == Matrix(1, 1, [0])
    assert u(p, 2).D * u(p, 1) == Matrix(1, 1, [0])

    p1, p2, p3 = [Symbol(x, real=True) for x in ["p1", "p2", "p3"]]
    pp1, pp2, pp3 = [Symbol(x, real=True) for x in ["pp1", "pp2", "pp3"]]
    k1, k2, k3 = [Symbol(x, real=True) for x in ["k1", "k2", "k3"]]
    kp1, kp2, kp3 = [Symbol(x, real=True) for x in ["kp1", "kp2", "kp3"]]

    p = (p1, p2, p3)
    pp = (pp1, pp2, pp3)

    k = (k1, k2, k3)
    kp = (kp1, kp2, kp3)

    mu = Symbol("mu")

    e = (pslash(p) + m * ones(4)) * (pslash(k) - m * ones(4))
    f = pslash(p) + m * ones(4)
    g = pslash(p) - m * ones(4)

    xprint("Tr(f*g)", Tr(f * g))

    M0 = [(v(pp, 1).D * mgamma(mu) * u(p, 1)) *
          (u(k, 1).D * mgamma(mu, True) * v(kp, 1)) for mu in range(4)]
    M = M0[0] + M0[1] + M0[2] + M0[3]
    M = M[0]
    if not isinstance(M, Basic):
        raise TypeError("Invalid type of variable")

    d = Symbol("d", real=True)  # d=E+m

    xprint("M", M)
    print("-" * 40)
    M = ((M.subs(E, d - m)).expand() * d**2).expand()
    xprint("M2", 1 / (E + m)**2 * M)
    print("-" * 40)
    x, y = M.as_real_imag()
    xprint("Re(M)", x)
    xprint("Im(M)", y)
    e = x**2 + y**2
    xprint("abs(M)**2", e)
    print("-" * 40)
    xprint("Expand(abs(M)**2)", e.expand())
Example #5
0
def test_creation():
    """
    Check that matrix dimensions can be specified using any reasonable type
    (see issue 1515).
    """
    raises(ValueError, 'zeros((3, 0))')
    raises(ValueError, 'zeros((1,2,3,4))')
    assert zeros(3L) == zeros(3)
    assert zeros(Integer(3)) == zeros(3)
    assert zeros(3.) == zeros(3)
    assert eye(3L) == eye(3)
    assert eye(Integer(3)) == eye(3)
    assert eye(3.) == eye(3)
    assert ones((3L, Integer(4))) == ones((3, 4))
    def run(self):
        '''refactor code later'''
        y = Matrix(self.y)
        x = Matrix(self.x)
        beta_sym = symbols(f'beta0:{self.beta.shape[1]}')  # BETA = tuple of length = 1 * p
        beta = Matrix(np.array(beta_sym))

        loss_func = (y - (x * beta)) * ones(beta.shape[1], 1)
        loss_func = ones(1, loss_func.shape[0]) * square_matrix_element_wise(loss_func) / 400  # mean
        # loss_func = ones(1, loss_func.shape[0]) * square_matrix_element_wise(loss_func)  # mean
        # loss_func = np.random.rand(1,1)

        penalty = self.lmda * (ones(1, beta.shape[0]) * square_matrix_element_wise(beta)) if self.l1 else 0
        return loss_func + penalty
Example #7
0
def test_creation_args():
    """
    Check that matrix dimensions can be specified using any reasonable type
    (see issue 1515).
    """
    raises(ValueError, 'zeros((3, -1))')
    raises(ValueError, 'zeros((1, 2, 3, 4))')
    assert zeros(3L) == zeros(3)
    assert zeros(Integer(3)) == zeros(3)
    assert zeros(3.) == zeros(3)
    assert eye(3L) == eye(3)
    assert eye(Integer(3)) == eye(3)
    assert eye(3.) == eye(3)
    assert ones((3L, Integer(4))) == ones((3, 4))
    raises(TypeError, 'Matrix(1, 2)')
Example #8
0
def make_state_space_sym(num_signals, num_states, is_homo):

    mu_rho_dict_sym = {}
    state_state_sym_dict = {}

    mu_names = ['mu_'+str(i) for i in range(num_states)]
    rho_names = ['rho_'+str(i) for i in range(num_states)]
    mu_rho_names = mu_names + rho_names

    mu_names_sym = [sympy.Symbol(x) for x in mu_names]
    rho_names_sym = [sympy.Symbol(x) for x in rho_names]
    mu_rho_names_sym = [sympy.Symbol(x) for x in mu_rho_names]
    mu_rho_dict_sym.update(dict(zip(mu_rho_names, mu_rho_names_sym)))

    A_identity_sym = sympy.eye(num_states)
    A_rhoblock_sym = sympy.diag(*rho_names_sym)
    A_sym = sympy.diag(A_identity_sym, A_rhoblock_sym)

    D_sym_mu_part = sympy.ones(num_signals, num_states)
    D_sym_zeta_part = sympy.ones(num_signals, num_states)
    D_sym = D_sym_mu_part.row_join(D_sym_zeta_part)

    if is_homo:
        sigmas_signal_names = [
            'sigma_signal_'+str(i) for i in range(num_signals)]
        sigmas_state_names = ['sigma_state_'+str(i) for i in range(num_states)]
        sigmas_signal_sym = [sympy.Symbol(x) for x in sigmas_signal_names]
        sigmas_state_sym = [sympy.Symbol(x) for x in sigmas_state_names]

        C_nonsingularblock_sym = sympy.diag(*sigmas_state_sym)
        G_nonsingularblock_sym = sympy.diag(*sigmas_signal_sym)

        C_singularblock_sym = sympy.zeros(num_states, num_states)
        G_singularblock_sym = sympy.zeros(num_signals, num_states)

        G_sym = G_singularblock_sym.row_join(G_nonsingularblock_sym)
        C_sym = sympy.diag(C_singularblock_sym, C_nonsingularblock_sym)

    main_matrices_sym = {
        'A_z': A_sym, 'C_z': C_sym, 'D_s': D_sym, 'G_s': G_sym}
    sub_matrices_sym = {'A_z_stable': A_rhoblock_sym,
                        'C_z_nonsingular': C_nonsingularblock_sym,
                        'G_s_nonsingular': G_nonsingularblock_sym}

    state_state_sym_dict.update(main_matrices_sym)
    state_state_sym_dict.update(sub_matrices_sym)

    return state_state_sym_dict
Example #9
0
def hamilton(M):
    text = "Metoda Cayleya-Hamiltona<br>"
    eigen_values, eigen_vectors = eigen(M)
    text += 'Eigenvalues: $' + sp.latex(eigen_values) + ' = ' + sp.latex(
        eigen_values.evalf(5)) + '$<br>'
    eigen_values = eigen_values.evalf(5)

    x = sp.symbols(
        str(['a' + str(i)
             for i in range(len(eigen_values))])[1:-1].replace("'", ""))
    if len(eigen_values) == 1:
        x = [x]

    A = sp.ones(len(x), 1)

    for i in range(1, len(x)):
        A = sp.Matrix(sp.BlockMatrix([A, sp.HadamardPower(eigen_values, i)]))

    b = sp.Matrix(sp.HadamardPower(sp.E, eigen_values * t))
    result = sp.Matrix(list(sp.linsolve((A, b), *x))[0])
    text += "$"+sp.latex(b)+' = '+sp.latex(A)+'\\bullet'+sp.latex(sp.Matrix(x))+'\\implies' + \
            sp.latex(sp.Matrix(x))+' = '+sp.latex(result) + "$<br>"

    s = sp.diag(*tuple([1] * len(x))) * result[0, 0]
    for i in range(1, len(x)):
        s += M * result[i, 0]

    text += "$e^{\\mathbb{A}t} = " + sp.latex(sp.simplify(s)) + "$<br><br>"

    return s, text
    def steady_state(self, start_state=None):
        # Initialize the probabilities for transisions to the same state
        matrix = self._fill_in_diagonal_transistions(self.matrix)

        # Subtract the identity matrix
        matrix = matrix - self._eye(matrix.rows)

        # Add a row at the bottom of the matrix for the equation that all
        # variable probabilities must add up to 1
        matrix = matrix.row_insert(matrix.rows, sympy.ones(1, matrix.cols))

        # Add a column for the target values
        matrix = matrix.col_insert(matrix.cols, sympy.zeros(matrix.rows, 1))
        matrix[matrix.rows-1, matrix.cols-1] = 1

        symbols = []
        for col in range(self._matrix_size(self.matrix)):
            symbols.append(sympy.Symbol("col_" + str(col)))
        solution = sympy.solve_linear_system(matrix, *symbols)

        result = {}
        for state in self.states:
            state_index = self.states[state]
            result[state] = solution[symbols[state_index]]
            
        return result
Example #11
0
def compute(mu, lmbda, rad, length):
    import sympy as smp
    from sympy import Matrix
    import numpy as np

    Tr  = lambda A: A[0, 0] + A[1, 1] + A[2, 2]
    Div = lambda A, b: Matrix(3, 3, lambda i, j: smp.diff(A[i,j], b[j], 1)) * smp.ones(A.shape[1], 1)

    l, m    = 1.25, 1.
    Id      = smp.eye(3)
    x, y, z = smp.symbols('x y z')
    u, v, w = [z*smp.exp(v/length) for v in [x, y, z]]
    grad_u  = Matrix([u, v, w]).jacobian(Matrix([x, y, z]))

    F       = smp.MatrixSymbol('F', 3, 3)
    C       = F.T*F
    E       = (C-Id)/2
    W       = 0.5*l*Tr(E)**2 + m*Tr(E*E)
    P       = Matrix(3, 3, lambda i, j: smp.diff(W, F[i, j], 1)).subs(F, grad_u + Id)
    f       = - Div(P, [x, y, z])

    u, v, w = smp.lambdify([x, y, z], u), smp.lambdify([x, y, z], v), smp.lambdify([x, y, z], w)

    return smp.lambdify([x, y, z], P), \
           smp.lambdify([x, y, z], f), \
           (lambda x_, y_, z_: np.array([u(x_, y_, z_), v(x_, y_, z_), w(x_, y_, z_)]).reshape(3,))
def get_matrix_of_converted_atoms(Nu, positions, pending_conversion, natural_influence, Omicron, D):
    """
    :param Nu: A matrix with a shape=(<number of Matters in Universe>, <number of Atoms in Universe>) where
    each Nu[i,j] stands for how many atoms of type j in matter of type i.
    :type Nu: Matrix
    :param ps: positions of matters
    :type ps: [Matrix]
    :return:
    """

    x, y = symbols('x y')

    number_of_matters, number_of_atoms = Nu.shape

    M = zeros(0, number_of_atoms)

    if number_of_matters != len(positions):
        raise Exception("Parameters shapes mismatch.")

    for (i, position) in enumerate(positions):
        (a, b) = tuple(position)
        K = get_conversion_ratio_matrix(pending_conversion, Nu[i, :])
        M = M.col_join(((diag(*(ones(1, number_of_atoms)*diag(*K)*Omicron.transpose()))*D).transpose() *
                        natural_influence).transpose().subs({x: a, y: b}))

    return M.evalf()
Example #13
0
    def test_matrix_tensor_product():
        l1 = zeros(4)
        for i in range(16):
            l1[i] = 2**i
        l2 = zeros(4)
        for i in range(16):
            l2[i] = i
        l3 = zeros(2)
        for i in range(4):
            l3[i] = i
        vec = Matrix([1,2,3])

        #test for Matrix known 4x4 matricies
        numpyl1 = np.matrix(l1.tolist())
        numpyl2 = np.matrix(l2.tolist())
        numpy_product = np.kron(numpyl1,numpyl2)
        args = [l1, l2]
        sympy_product = matrix_tensor_product(*args)
        assert numpy_product.tolist() == sympy_product.tolist()
        numpy_product = np.kron(numpyl2,numpyl1)
        args = [l2, l1]
        sympy_product = matrix_tensor_product(*args)
        assert numpy_product.tolist() == sympy_product.tolist()

        #test for other known matrix of different dimensions
        numpyl2 = np.matrix(l3.tolist())
        numpy_product = np.kron(numpyl1,numpyl2)
        args = [l1, l3]
        sympy_product = matrix_tensor_product(*args)
        assert numpy_product.tolist() == sympy_product.tolist()
        numpy_product = np.kron(numpyl2,numpyl1)
        args = [l3, l1]
        sympy_product = matrix_tensor_product(*args)
        assert numpy_product.tolist() == sympy_product.tolist()

        #test for non square matrix
        numpyl2 = np.matrix(vec.tolist())
        numpy_product = np.kron(numpyl1,numpyl2)
        args = [l1, vec]
        sympy_product = matrix_tensor_product(*args)
        assert numpy_product.tolist() == sympy_product.tolist()
        numpy_product = np.kron(numpyl2,numpyl1)
        args = [vec, l1]
        sympy_product = matrix_tensor_product(*args)
        assert numpy_product.tolist() == sympy_product.tolist()

        #test for random matrix with random values that are floats
        random_matrix1 = np.random.rand(np.random.rand()*5+1,np.random.rand()*5+1)
        random_matrix2 = np.random.rand(np.random.rand()*5+1,np.random.rand()*5+1)
        numpy_product = np.kron(random_matrix1,random_matrix2)
        args = [Matrix(random_matrix1.tolist()),Matrix(random_matrix2.tolist())]
        sympy_product = matrix_tensor_product(*args)
        assert not (sympy_product - Matrix(numpy_product.tolist())).tolist() > \
        (ones((sympy_product.rows,sympy_product.cols))*epsilon).tolist()

        #test for three matrix kronecker
        sympy_product = matrix_tensor_product(l1,vec,l2)

        numpy_product = np.kron(l1,np.kron(vec,l2))
        assert numpy_product.tolist() == sympy_product.tolist()
Example #14
0
def met_zeyd(A, b):
    C, d = iteration_view(A, b)
    H = sympy.zeros(3)
    F = sympy.zeros(3)
    c = sympy.zeros(3,1)
    for i in xrange(3):
        c[i] = d[i]
        for j in xrange(3):
            if i > j:
                H[i, j] = C[i, j]
            else:
                F[i, j] = C[i, j]
    print "\nx = Cx + d\n"
    print "C = \n", C, "\n", "\nd = \n", d
    print "\nConvergence: ", convergence_mzeyd(C, d)
    if convergence_mzeyd(C, d):
        E = sympy.eye(3)
        x0 = sympy.ones(3, 1)
        x1 = (E-H).inv()*F*x0 + (E-H).inv()*c
        while ((x1-x0)[0] > 0.00001 or (x1-x0)[1] > 0.00001 or\
              (x1-x0)[2] > 0.00001 or (x0-x1)[0] > 0.00001 or\
              (x0-x1)[1] > 0.00001 or (x0-x1)[2] > 0.00001):
              x0 = x1
              x1 = (E-H).inv()*F*x0 + (E-H).inv()*c
        print "\nSolution:" 

    return [element for element in x1]
Example #15
0
def cholesky(A):
    """
    # A is positive definite mxm
    """
    assert A.shape[0] == A.shape[1]
    # assert all(A.eigenvals() > 0)
    m = A.shape[0]
    N = deepcopy(A)
    D = ones(*A.shape)
    for i in xrange(m - 1):
        for j in xrange(i + 1, m):
            N[j, i] = N[i, j]
            D[j, i] = D[i, j]
            n, d = ratior(N[i, j], D[i, j], N[i, i], D[i, i])
            N[i, j], D[i, j] = n, d
            if verbose_chol:
                print "i={}, j={}".format(i + 1, j + 1)
                print "N:"
                printnp(N)
                print "D:"
                printnp(D)
        for k in xrange(i + 1, m):
            for l in xrange(k, m):
                n, d = multr(N[k, i], D[k, i], N[i, l], D[i, l])
                N[k, l], D[k, l] = subr(N[k, l], D[k, l], n, d)
                if verbose_chol:
                    print "k={}, l={}".format(k + 1, l + 1)
                    print "N:"
                    printnp(N)
                    print "D:"
                    printnp(D)
    return N, D
Example #16
0
            def recursive_com(link, frame):
                """
                Recursive function to compute the center of mass
                Parameters
                ----------
                link : links.Link
                    Link from which you want to compute the center of mass.

                frame : links.link
                    Frame in which you want to express the CoM

                Returns
                -------

                com : sympy.matrices.dense.MutableDenseMatrix
                    Jacobian matrix between origin and destination

                """
                m = link.mass / self.mass
                hc = ones(4, 1)
                hc[:3, :] = link.com
                if link.is_root:
                    T = eye(4, 4)
                else:
                    T = self.forward_kinematics(
                        f"link_{frame.link_id}", f"joint_"
                        f"{link.child_joints[0]}")
                cm = m * (T @ hc)
                if link.is_terminal:
                    return cm
                else:
                    for child in link.parent_joints:
                        cm += recursive_com(
                            self.links[self.joints[child].child], frame)
                    return cm
Example #17
0
 def relaxation_local(self, m, with_rel_velocity = False):
     if with_rel_velocity:
         eq = (self.Tu*self.eq).subs(list(zip(self.mv, m)))
     else:
         eq = self.eq.subs(list(zip(self.mv, m)))
     relax = (sp.ones(*self.s.shape) - self.s).multiply_elementwise(sp.Matrix(m)) + self.s.multiply_elementwise(eq)
     alltogether(relax)
     return Eq(m, relax)
Example #18
0
def initialise_working_matrices(G):
    """  G is a nonzero matrix with at least two rows.  """
    B = eye(G.shape[0])
    # Lower triang matrix
    L = zeros(G.shape[0], G.shape[0])
    D = ones(G.shape[0] + 1, 1)
    A = Matrix(G)
    return A, B, L, D
Example #19
0
    def transform_col_vector(V):
        print(V.shape)
        ones_vector = sp.ones(1, V.shape[0])
        print(ones_vector.shape)

        V = V * ones_vector
        V = V.T
        return V
Example #20
0
    def calc_weights(self, lhs_matrix, num_columns):
        """Calculate log-mean divisia weights

        Args:
            lhs_matrix (Symbolic Matrix): Matrix representing the LHS variable
                                          (the variable to decompose) symbolicly

        Returns:
            weights (Symbolic Matrix): The log-mean divisia weights
        """
        lhs_total = lhs_matrix * sp.ones(lhs_matrix.shape[1], 1)
        print('lhs_total:', lhs_total)
        lhs_total = self.transform_col_vector(lhs_total)
        print('lhs_total tiled:', lhs_total)

        sp.pprint(lhs_total)
        print(lhs_total.shape)
        sp.pprint(lhs_matrix)
        print(lhs_matrix.shape)
        # lhs_total = sp.HadamardPower(lhs_total, -1)
        lhs_share = lhs_total * lhs_matrix
        print(lhs_share.shape)

        shift_matrix, long_matrix = self.shift_matrices(lhs_matrix)
        shift_share, long_share = self.shift_matrices(lhs_share)

        log_mean_matrix = self.logarithmic_average(long_matrix,
                                                   shift_matrix)
        log_mean_matrix_total = log_mean_matrix * sp.ones(
                                            log_mean_matrix.shape[1], 1)

        log_mean_share = self.logarithmic_average(long_share,
                                                  shift_share)
        log_mean_share_total = log_mean_share * sp.ones(
                                            log_mean_share.shape[1], 1)

        if self.model == 'additive':
            weights = self.additive_weights(log_mean_matrix,
                                            log_mean_matrix_total,
                                            log_mean_share_total)
        elif self.model == 'multiplicative':
            weights = self.multiplicative_weights(log_mean_matrix,
                                                  log_mean_share,
                                                  log_mean_share_total,
                                                  log_mean_matrix_total)
        return weights
Example #21
0
def initialise_working_matrices(G):
    """  G is a nonzero matrix with at least two rows.  """
    B = eye(G.shape[0])
    # Lower triang matrix
    L = zeros(G.shape[0], G.shape[0])
    D = ones(G.shape[0] + 1, 1)
    A = Matrix(G)
    return A, B, L, D
Example #22
0
def elementary_weight(tree,s,arrays,method):
    """
        Constructs elementary weights for a Volterra Runge-Kutta method,
        supposing the row sum condition.
        The output needs to be multiplied by b^T and equated to LHS
        to obtain the order condition.
        
        It is used by gen_order_conditions in vrk_methods
        
        INPUT:
            
        - tree   -- input tree, must be a RootedTree.
        - s      -- number of stages
        - arrays -- it depends on the method input, is a list containing two
                    or three arrays, the order should be: c,A,e,D (if VRK)
                    c,A,d (if BVRK) or c,A if (if PVRK)
        - method -- select which type of method to use: VRK, BVRK or if a PVRK
                    method is wanted, the three must be created with the xa='a' flag.
        
        OUTPUT: it is a column vector belonging to 
                <type 'numpy.ndarray'>
                e.g. like array([[],...,[]], dtype=object)

    """
    from sympy import eye, ones
    if tree=='': return ''    
    
    u=np.array(ones((s,1)))   #np.ones((s, 1), dtype=np.int)
    if tree=='a': return u # Matrix(u)

    I=np.array(eye(s))    # np.eye(s, dtype=np.int)
    ew=u.copy()
    
    c=arrays[0]
    A=arrays[1]
    if method=='VRK':
        d=arrays[2]
        D=arrays[3]
    elif method=='BVRK':
        d=arrays[2]
    else: #method=='PVRK'
        d=arrays[0]
    
    
    
    nx,na,subtrees=tree._parse_subtrees()
    ew*=c**na #na and nx can also be zero, then we have a vector of ones.
    ew*=d**nx
    
    # Two curly bracket contain at least a symbol, 'a' or 'x', i.e {} is not a leaf
    if len(subtrees)>0:
        for subtree in subtrees:
            if method=='VRK':
                ew=ew*_elem_weight_sub3(subtree,method,c,D,A,I,u)
            else:
                ew=ew*_elem_weight_sub3(subtree,method,c,d,A,I,u)
    
    return ew #returns a column
Example #23
0
def exp_so3_symbols(rotvec):
    epsilons = EPSILON * sympy.ones(3, 1)
    # add 'epsilons' to 'rotvec' directly
    # ZeroDivision occurs in grad calculation if we add EPSILON to 'theta'
    # in the denominator
    theta = Matrix(rotvec + epsilons).norm()
    K = tangent_so3(rotvec / theta)
    I = sympy.eye(3)
    return I + sympy.sin(theta) * K + (1-sympy.cos(theta)) * K * K
Example #24
0
def dirac():
    A = 0.1974  # Fermi velocity

    kx = sp.Symbol('kx', real=True)
    ky = sp.Symbol('ky', real=True)

    dx = -ky / (2 * (kx**2 + ky**2))
    dy = kx / (2 * (kx**2 + ky**2))

    prex = dx * sp.ones(2, 2)
    prey = dy * sp.ones(2, 2)

    dirac_system = cued.hamiltonian.BiTeBandstructure(vF=A,
                                                      prefac_x=prex,
                                                      prefac_y=prey,
                                                      flag='dipole')

    return dirac_system
Example #25
0
def construct_beta(Kbeta):
    # symbole associé au petit t (c'est à dire l'instant dans la période de suivi)
    t = sy.Symbol("t")
    # smbole associé au grand T (c'est à dire la durée de suivie)
    s = sy.Symbol("s")
    syPhi = sy.ones(Kbeta, 1)
    syb = sy.ones(1, Kbeta)
    b = [[] for k in range(Kbeta)]
    v = [np.arange(np.sqrt(Kbeta)), np.arange(np.sqrt(Kbeta))]
    expo = cg.expandnp(v)
    for x in range(len(expo[:, 0])):
        syPhi[x] = (t ** expo[x, 0]) * (s ** expo[x, 1])
        syb[x] = sy.Symbol("b" + str(x))
        b[x] = sy.Symbol("b" + str(x))
    syBeta = syb * syPhi
    syBeta = syBeta[0, 0]
    arg = [t, s] + b
    Beta_fonc_est = sy.lambdify(tuple(arg), syBeta, "numpy")
    return Beta_fonc_est
Example #26
0
def _pressure_tensor(grid):
    press = [Symbol('flux[%d]' % i) for i in range(grid.dim * (grid.dim + 1) / 2)]
    P = sympy.ones(grid.dim)  # P_ab - rho cs^2 \delta_ab
    k = 0
    for i in range(grid.dim):
        for j in range(i, grid.dim):
            P[i, j] = press[k]
            P[j, i] = press[k]
            k += 1
    return P
Example #27
0
def _pressure_tensor(grid):
    press = [Symbol('flux[%d]' % i) for i in range(grid.dim * (grid.dim + 1) / 2)]
    P = sympy.ones(grid.dim)  # P_ab - rho cs^2 \delta_ab
    k = 0
    for i in range(grid.dim):
        for j in range(i, grid.dim):
            P[i, j] = press[k]
            P[j, i] = press[k]
            k += 1
    return P
Example #28
0
def expectation(lam):
    p = Matrix([[-prob(i, j, lam) for j in range(k)] for i in range(k)])
    for i in range(k):
        p[(k + 1) * i] = -sum(p.row(i)) + (Fraction(
            lam, n))**(k - i) * (Fraction(n - lam, n))**(n - k + i)
    return next(
        iter(
            linsolve(
                (p, ones(k, 1)),
                symbols(', '.join(['x_{}'.format(i) for i in range(k)])))))[0]
Example #29
0
    def BodyVelocity(self):
        V = sym.ones(6, 1)
        V[:3, 0] = self.R.T * self.t.diff(t)

        # Angular velocities skew symetric matrix
        S = self.R.T * self.R.diff(t)
        V[3, 0] = S[2, 1]
        V[4, 0] = S[0, 2]
        V[5, 0] = S[1, 0]
        return V
Example #30
0
File: PH.py Project: goujou/LAPM
def z(A):
    """Return the (symbolic) vector of rates toward absorbing state.

    Args:
        A (SymPy dxd-matrix): compartment matrix

    Returns:
        SymPy dx1-matrix: :math:`\\bf{z} = -B^T\\,\\bf{1}`
    """
    o = ones(A.rows, 1)
    return -A.transpose()*o
Example #31
0
def z(B):
    """Return the (symbolic) vector of rates toward absorbing state.

    Args:
        B (SymPy dxd-matrix): compartment matrix

    Returns:
        SymPy dx1-matrix: :math:`z = -B^T\\,\\mathbf{1}`
    """
    o = ones(B.rows, 1)
    return -B.transpose() * o
Example #32
0
    def weighted_term(weight, term):
        """Calculate components from dot product of weights and term

        Args:
            weight (MatrixSymbol): normalized log-mean divisia weights
            term (MatrixSymbol): The effect of the component (RHS) variable
                                 on the change in the LHS variable
        """
        weighted_term = sp.matrix_multiply_elementwise(weight, term).doit()
        ones_ = sp.ones(weighted_term.shape[1], 1)
        component = weighted_term * ones_
        return component
    def run_batch(self, loop_num):
        # check if learned val is one of learnable param in func
        i = loop_num
        s = (i) * self.bs
        f = (i + 1) * self.bs if (i + 1) * self.bs < self.x.shape[0] + 1 else self.x.shape[0] + 1
        #--------create matrixsymbol and pass in value of matrix

        # y = MatrixSymbol('y', self.y[s:f].shape[0],self.y[s:f].shape[1] )
        # x = MatrixSymbol('x', self.x[s:f].shape[0], self.x[s:f].shape[1])
        y = Matrix(self.y[s:f])
        x = Matrix(self.x[s:f])
        beta_sym = symbols(f'beta0:{self.beta.shape[1]}')  # BETA = tuple of length = 1 * p
        beta = Matrix(np.array(beta_sym))

        loss_func = (y - (x * beta)) * ones(beta.shape[1], 1)
        loss_func = ones(1, loss_func.shape[0]) * square_matrix_element_wise(loss_func)
        # loss_func = ones(1, loss_func.shape[0]) * square_matrix_element_wise(loss_func)  # mean
        # loss_func = np.random.rand(1,1)

        penalty =  self.lmda * (ones(1, beta.shape[0]) * square_matrix_element_wise(beta)) if  self.l1 else 0
        return loss_func + penalty
Example #34
0
File: disp.py Project: vikramsg/PDE
 def lagrange(self, nodes):
     """
     Lagrange polynomial 
     """
     length = len(nodes)
     r = sympy.Symbol('r')
     phi = sympy.ones(1, length)
     for k in range(length):
         for l in range(length):
             if (k != l):
                 phi[k] *= (r - nodes[l])/(nodes[k] - nodes[l])
     return phi 
Example #35
0
def test_zeros_ones_fill():
    n, m = 3, 5

    a = zeros( (n, m) )
    a.fill( 5 )

    b = 5 * ones( (n, m) )

    assert a == b
    assert a.rows == b.rows == 3
    assert a.cols == b.cols == 5
    assert a.shape == b.shape == (3, 5)
Example #36
0
def test_speed():
    max_nodes = 20
    min_nodes = 1
    n = min_nodes

    times = []
    while n <= max_nodes:
        nodes = list(range(n))
        G_bus = sp.ones(n)
        B_bus = sp.ones(n)

        t_start = time.time()

        F, Jaco, Fxx, Jxx, VM, VA, P, Q, Node_index = create_powermismatch(G_bus, B_bus, nodes)

        times.append(time.time() - t_start)

        n+=1

    plt.plot(range(min_nodes, max_nodes+1), times, 'o')
    plt.show()
Example #37
0
def test_zeros_ones_fill():
    n, m = 3, 5

    a = zeros((n, m))
    a.fill(5)

    b = 5 * ones((n, m))

    assert a == b
    assert a.lines == b.lines == 3
    assert a.cols == b.cols == 5
    assert a.shape == b.shape == (3, 5)
Example #38
0
    def __init__(self, DH):  #DH = [d,a,theta,alpha]
        assert len(DH) == 4
        dh_ = list()

        type_ = type(DH[0])

        for i in range(len(DH)):
            if not isinstance(DH[i], type_):
                sys.exit(
                    "Denavit Hartenberg parameters must be from same type per joint"
                )

            if isinstance(DH[i], str):  #
                dh_.append(sp.Symbol(DH[i]))  #string
            else:
                if i is 2 or i is 3:
                    dh_.append(DH[i] * np.pi / 180)  #number
                else:
                    dh_.append(DH[i])

        if type_ is str:
            self.H = sp.Matrix(sp.ones(4, 4))
            c = sp.cos
            s = sp.sin
        else:
            self.H = np.matrix(np.ones((4, 4)))
            c = np.cos
            s = np.sin

        #First Row
        self.H[0, 0] = c(dh_[2])
        self.H[0, 1] = -s(dh_[2]) * c(dh_[3])
        self.H[0, 2] = s(dh_[2]) * s(dh_[3])
        self.H[0, 3] = c(dh_[1]) * dh_[1]

        #Second Row
        self.H[1, 0] = s(dh_[2])
        self.H[1, 1] = c(dh_[2]) * c(dh_[3])
        self.H[1, 2] = -c(dh_[2]) * s(dh_[3])
        self.H[1, 3] = s(dh_[2]) * dh_[1]

        #THird Row
        self.H[2, 0] = 0
        self.H[2, 1] = s(dh_[3])
        self.H[2, 2] = c(dh_[3])
        self.H[2, 3] = dh_[0]
        #Forth Row
        self.H[3, 0] = 0
        self.H[3, 1] = 0
        self.H[3, 2] = 0
        self.H[3, 3] = 1
Example #39
0
def Pre_Comp_YX(L, T, Xdata, Y, Kbeta, J):
    t = sy.Symbol("t")
    s = sy.Symbol("s")
    # Récupération des variables et paramètres
    N = len(L)
    D = len(L[0])
    # ----------------- Construction de la base fonctionnelle
    syPhi = sy.ones(Kbeta ** 2, 1)
    syb = sy.ones(1, Kbeta ** 2)
    v = [np.arange(Kbeta), np.arange(Kbeta)]
    expo = cg.expandnp(v)
    Phi_fonc = [[] for j in range(Kbeta ** 2)]
    for x in range(len(expo[:, 0])):
        syPhi[x] = (t ** expo[x, 0]) * (s ** expo[x, 1])
        Phi_fonc[x] = sy.lambdify((t, s), syPhi[x], "numpy")
        syb[x] = sy.Symbol("b" + str(x))
    syBeta = syb * syPhi
    Phi_mat = Comp_Phi(Phi_fonc, T, J)
    I_pen = J22_fast(syPhi, np.max(T), 50)[3]
    # ----------------- Construction des noyaux et leurs dérivées
    # Construction de la forme du noyau
    el1 = sy.Symbol("el1")
    per1 = sy.Symbol("per1")
    sig1 = sy.Symbol("sig1")
    args1 = [el1, per1, sig1]
    el2 = sy.Symbol("el2")
    sig2 = sy.Symbol("sig2")
    args2 = [el2, sig2]
    syk = cg.sy_Periodic((s, t), *args1) + cg.sy_RBF((s, t), *args2)
    args = [t, s] + args1 + args2
    # Dérivation et construction des fonctions vectorielles associées
    k_fonc = sy.lambdify(tuple(args), syk, "numpy")
    n_par = len(args) - 2
    k_der = [[] for i in range(n_par)]
    for i in range(n_par):
        func = syk.diff(args[i + 2])
        k_der[i] = sy.lambdify(tuple(args), func, "numpy")
    return (Phi_mat, k_fonc, k_der, I_pen)
Example #40
0
def lambdifyz(symbols, expr, modules="numpy"):
    """
    Circumvents a bug in lambdify where silent 
    variables will be simplified and therefore
    aren't broadcasted. https://github.com/sympy/sympy/issues/5642
    Use an extra argument = 0 when calling
    """
    assert isinstance(expr, sp.Matrix)
    z = sp.symbols("z")
    thesymbols = list(symbols)
    thesymbols.append(z)
    exprz = expr + z * sp.prod(symbols) * sp.ones(expr.shape[0], expr.shape[1])
    fz = sp.lambdify(thesymbols, exprz, modules=modules)
    return fz
Example #41
0
File: PH.py Project: goujou/LAPM
def cum_dist_func(beta, A, Qt):
    """Return the (symbolic) cumulative distribution function of phase-type.

    Args:
        beta (SymPy dx1-matrix): initial distribution vector
        A (SymPy dxd-matrix): transition rate matrix
        Qt (SymPy dxd-matrix): Qt = :math:`e^{t\\,\\bf{A}}`

    Returns:
        SymPy expression: cumulative distribution function of PH(:math:`\\bf{\\beta}`, :math:`\\bf{A}`)
            :math:`F_T(t) = 1 - \\bf{1}^T\\,e^{t\\,\\bf{A}}\\,\\bf{\\beta}`
    """
    o = ones(1, A.cols)
    return 1 - (o * (Qt * beta))[0]
Example #42
0
File: PH.py Project: goujou/LAPM
def nth_moment(beta, A, n):
    """Return the (symbolic) nth moment of the phase-type distribution.

    Args:
        beta (SymPy dx1-matrix): initial distribution vector
        A (SymPy dxd-matrix): transition rate matrix
        n (positive int): order of the moment
    
    Returns:
        SymPy expression: nth moment of PH(:math:`\\bf{\\beta}`, :math:`\\bf{A}`)
            :math:`\mathbb{E}[T^n] = (-1)^n\\,n!\\,\\bf{1}^T\\,\\bf{A}^{-1}\\,\\bf{\\beta}`
    """
    o = ones(1, A.cols)
    return ((-1)**n*factorial(n)*o*(A**-n)*beta)[0]
Example #43
0
    def __init__(self, times, A0, k1, kinv, k2, using_sympy=False):
        """Generates baseline/true dataset of C concentration over time based on the provided parameters

        Args:
            times (array): times at which to evaluate C throughout calculations (should never be changed after being set)
            A0 (float): initial value of concentratin of A (should never be changed after being set)
            k1 (float): true value of parameter k1
            kinv (float): true value of parameter kinv
            k2 (float): true value of parameter k2
            using_sympy (bool): whether or not to generate gradient and hessian functions of the ob. fn. from symbolic representations
        """
        self._A0 = A0
        self._TIMES = times
        self._Cs = self.gen_timecourse(k1, kinv, k2)

        self.sympy_lsq_of_f = None
        self.sympy_lsq_of_gradient = None
        self.sympy_lsq_of_hessian = None
        if using_sympy:
            # set up sympy objective function
            k1, kinv, k2 = sympy.symbols('k1,kinv,k2')
            alpha = (k1 + kinv + k2 +
                     ((k1 + kinv + k2)**2 - 4 * k1 * k2)**0.5) / 2
            beta = (k1 + kinv + k2 -
                    ((k1 + kinv + k2)**2 - 4 * k1 * k2)**0.5) / 2
            sympy_times = sympy.Matrix(times).transpose()
            Cs_true = sympy.Matrix(self._Cs).transpose()
            # Cs = sympy.zeros(times.shape[0], 1)
            ones = sympy.ones(
                1, times.shape[0]
            )  # for adding the time-independent term at each time
            Cs = self._A0 * (
                ones * k1 * k2 / (alpha * beta) +
                (-alpha * sympy_times).applyfunc(sympy.exp) * k1 * k2 /
                (alpha * (alpha - beta)) -
                (-beta * sympy_times).applyfunc(sympy.exp) * k1 * k2 /
                (beta * (alpha - beta)))
            # # if we've already constructed and compiled the functions from sympy, just reload
            # # jk doesn't work
            # symbolic_lsq_of = None
            # if isfile("./data/symbolic-of.dill"):
            #     symbolic_lsq_of = dill.load(open("./data/symbolic-of.dill", 'r'))
            # else:
            # symbolic_lsq_of = ObjectiveFunction(sum((Cs - Cs_true).applyfunc(lambda x:x*x)), [k1, kinv, k2])
            # dill.dump(symbolic_lsq_of, open("./data/symbolic-of.dill", 'w'))
            symbolic_lsq_of = ObjectiveFunction(
                sum((Cs - Cs_true).applyfunc(lambda x: x * x)), [k1, kinv, k2])
            self.sympy_lsq_of_f = symbolic_lsq_of.f
            self.sympy_lsq_of_gradient = symbolic_lsq_of.gradient
            self.sympy_lsq_of_hessian = symbolic_lsq_of.hessian
Example #44
0
def getMatElemElasticity(dim=2):
    eps_ii = lamb*sympy.ones(dim, dim) + 2*mu*sympy.eye(dim)
    dimC = dim + 1 if dim==2 else 2*dim
    C = sympy.zeros(dimC, dimC)
    C[:dim, :dim] = eps_ii

    if dim == 2:
        C[-1, -1] = mu
    elif dim == 3:
        for i in range(1, dim+1):
            C[-i, -i] = mu

    phi = basis_function[dim]

    B = sympy.zeros(C.shape[0], dim*len(phi))

    deriv = [x, y, z]

    for j in range(dim):
        for i in range(len(phi)):
            B[j, dim*i + j] = phi[i].diff(deriv[j])
    
    if dim == 2:
        for i in range(len(phi)):
            B[-1, dim*i] = phi[i].diff(y)
            B[-1, dim*i + 1] = phi[i].diff(x)
    elif dim == 3:
        for i in range(len(phi)):
            B[-3, dim*i] = phi[i].diff(y)
            B[-3, dim*i + 1] = phi[i].diff(x)
            B[-2, dim*i] = phi[i].diff(z)
            B[-2, dim*i + 2] = phi[i].diff(x)
            B[-1, dim*i + 1] = phi[i].diff(z)
            B[-1, dim*i + 2] = phi[i].diff(y)

    A = B.T*C*B

    output = sympy.zeros(dim*len(phi), dim*len(phi))
    for i in range(A.shape[0]):
        for j in range(A.shape[1]):
            #A[i, j] = A[i, j].together().factor()
            if dim == 2:
                output[i, j] = sympy.integrate(A[i, j], (y, 0., hy), (x, 0., hx)).expand()
            elif dim == 3:
                output[i, j] = sympy.integrate(A[i, j], (z, 0., hz), (y, 0., hy), (x, 0., hx)).expand()
            output[i, j].together().factor()

    #return sympy.lambdify((hx, hy, hz, lamb, mu), output, "numpy")
    return output
Example #45
0
 def basis(self):
     dofs = self.number_of_dofs()
     phi = sp.ones(1, dofs)
     p = self.p
     phi[1] = self.x / self.h
     phi[2] = self.y / self.h
     if p > 1:
         start = 3
         for i in range(2, p + 1):
             for j, k in zip(range(start, start + i),
                             range(start - i, start)):
                 phi[j] = phi[k] * phi[1]
             phi[start + i] = phi[start - 1] * phi[2]
             start += i + 1
     return phi
Example #46
0
 def basis(self, p=None):
     p = self.p if p is None else p
     dofs = self.number_of_dofs(p=p)
     phi = sp.ones(1, dofs)
     phi[1] = (self.x - self.barycenter[0]) / self.h
     phi[2] = (self.y - self.barycenter[1]) / self.h
     if p > 1:
         start = 3
         for i in range(2, p + 1):
             for j, k in zip(range(start, start + i),
                             range(start - i, start)):
                 phi[j] = phi[k] * phi[1]
             phi[start + i] = phi[start - 1] * phi[2]
             start += i + 1
     return phi
Example #47
0
def cholesky(A):
    """
    # A is positive definite mxm
    """
    assert A.shape[0] == A.shape[1]
    # assert all(A.eigenvals() > 0)
    m = A.shape[0]
    N = deepcopy(A)
    D = ones(*A.shape)
    for i in xrange(m - 1):
        for j in xrange(i + 1, m):
            N[j, i] = N[i, j]
            D[j, i] = D[i, j]
            n, d = ratior(N[i, j], D[i, j], N[i, i], D[i, i])
            N[i, j], D[i, j] = n, d
        for k in xrange(i + 1, m):
            for l in xrange(k, m):
                n, d = multr(N[k, i], D[k, i], N[i, l], D[i, l])
                N[k, l], D[k, l] = subr(N[k, l], D[k, l], n, d)
    return N, D
Example #48
0
File: disp.py Project: vikramsg/PDE
 def lagrangeDeri(self, nodes):
     """
     Lagrange matrix at the nodes is just an Identity
     We'll come back to interpolation at points other than nodes
     at a later time
     Here we create derivative operator at the nodes
     Lagrange polynomial is
     phi = Product(l, l.neq.k) (r - r_l)/(r_k - r_l)
     r_i are the nodes
     """
     length = len(nodes)
     r = sympy.Symbol('r')
     phi = sympy.ones(1, length)
     dPhi = sympy.zeros(length, length)
     for k in range(length):
         for l in range(length):
             if (k != l):
                 phi[k] *= (r - nodes[l])/(nodes[k] - nodes[l])
     for k in range(length):
         for l in range(length):
             dPhi[k, l] = sympy.diff(phi[l]).evalf(subs = {r: nodes[k]})
     return dPhi
Example #49
0
    def relaxation_local(self, m, with_rel_velocity=False):
        """
        Return symbolic expression which computes the relaxation operator.

        Parameters
        ----------

        m : SymPy Matrix
            indexed objects for the moments

        with_rel_velocity : boolean
            check if the scheme uses relative velocity.
            (default is False)

        """
        if with_rel_velocity:
            eq = (self.Tu*self.eq).subs(list(zip(self.mv, m)))
        else:
            eq = self.eq.subs(list(zip(self.mv, m)))
        relax = (sp.ones(*self.s.shape) - self.s).multiply_elementwise(sp.Matrix(m)) + self.s.multiply_elementwise(eq)
        alltogether(relax)
        return Eq(m, relax)
Example #50
0
def J22(syPhi, Tmax):
    # taille de la base fonctionnelle
    Kbeta = len(syPhi)
    # symbole associé au petit t (c'est à dire l'instant dans la période de suivi)
    t = sy.Symbol("t")
    # smbole associé au grand T (c'est à dire la durée de suivie)
    s = sy.Symbol("s")
    deb = time.clock()
    Phi = sy.ones(Kbeta, 1)
    for i in range(Kbeta):
        Phi[i] = syPhi[i]
    Phi_dsds = Phi.diff(s, s)
    Phi_dsdt = Phi.diff(s, t)
    Phi_dtdt = Phi.diff(t, t)

    Is = np.zeros((Kbeta, Kbeta), float)
    Ic = np.zeros((Kbeta, Kbeta), float)
    It = np.zeros((Kbeta, Kbeta), float)

    def gfun(x):
        return 0

    def hfun(x):
        return x

    for i in range(Kbeta):
        for j in range(Kbeta):
            func = sy.lambdify((t, s), Phi_dsds[j] * Phi_dsds[i], "numpy")
            Is[i, j] = integrate.dblquad(func, 0.0, Tmax, gfun, hfun, epsabs=5e-03)[0]
            func = sy.lambdify((t, s), Phi_dsdt[j] * Phi_dsdt[i], "numpy")
            Ic[i, j] = integrate.dblquad(func, 0.0, Tmax, gfun, hfun, epsabs=5e-03)[0]
            func = sy.lambdify((t, s), Phi_dsdt[j] * Phi_dtdt[i], "numpy")
            It[i, j] = integrate.dblquad(func, 0.0, Tmax, gfun, hfun, epsabs=5e-03)[0]
    print(str(time.clock() - deb) + " secondes de calcul")
    I_pen = Is + It + 2 * Ic
    return (Is, Ic, It, I_pen)
Example #51
0
def randomWeightMatrix(x, y):
    mat = ones([x, y])
    f = lambda x: random.uniform(0, 1) * x
    mat = mat.applyfunc(f)
    return mat
Example #52
0
def _look_for_case_tree(robo,symo):
    try_paul_str =  ("\r\n\r\n# Branch {0}") + \
        ("of the robot cannot be solved by PIEPER METHOD. ") + \
        ("This branch has less than 6 joints. ") + \
        ("Try Paul Method \r\n\r\n")
    End_joints = []
    X_joints = []
    pieper_joints = []
    j = []
    for i in range(robo.NJ+1):
        j.append(i)
    for joint in range(1, len(robo.ant)):
        if j[joint] not in robo.ant and robo.sigma[joint] != 2:
            End_joints.append(j[joint])
            branches = len(End_joints)
    symo.write_line("# The tree structure robot has {0} branches.".format(branches))
    [bool_fail, bool_prism, bool_spherical] = [zeros(1, branches), zeros(1, branches), zeros(1, branches)]
    [pieper_branches, com_key] = [999*ones(1, branches), 999*ones(1, branches)]
    num_prism = zeros(1, branches)
    for i in range(branches):
        f_joint = End_joints[i]
        c = 0
        globals()["bran"+str(i)] = [0]*max(robo.ant)
        while f_joint != 0:
            globals()["bran"+str(i)][c] = f_joint
            f_joint = robo.ant[f_joint]
            c += 1
        globals()["bran"+str(i)] = [x for x in globals()["bran"+str(i)] if x != 0]
        globals()["bran"+str(i)] = globals()["bran"+str(i)][::-1]
        if len(globals()["bran"+str(i)]) > 6:
            symo.write_line(
                ("\r\n\r\n# Branch {0} ".format(i+1)) + \
                ("of the robot cannot be solved by PIEPER METHOD. ") + \
                ("This branch is redundant. \r\n\r\n")
            )
            bool_fail[i] = 1
        elif len(globals()["bran"+str(i)]) < 6:
            symo.write_line(try_paul_str.format(i+1))
            bool_fail[i] = 1
        else:
            bool_fail[i] = 0
            for k in range(len(globals()["bran"+str(i)])):
                num_prism[i] = num_prism[i] + robo.sigma[globals()["bran"+str(i)][k]]
            if num_prism[i] > 3:
                symo.write_line(
                    ("\r\n\r\n# Branch {0} ".format(i+1)) + \
                    ("of the robot cannot be solved by PIEPER METHOD. ") + \
                    ("It is redundant (more than 3 prismatic joints). \r\n\r\n")
                )
                bool_fail[i] = 1
            elif num_prism[i] == 3:
                pieper_branches[i] = i
                bool_prism[i] = 1
                p_joints = []
                joints = []
                for joint in range(len(globals()["bran"+str(i)])):
                    if robo.sigma[globals()["bran"+str(i)][joint]] == 1:
                        pieper_joints.append(globals()["bran"+str(i)][joint])
                        p_joints.append(globals()["bran"+str(i)][joint])
                    else:
                        X_joints.append(globals()["bran"+str(i)][joint])
                        joints.append(robo.sigma[globals()["bran"+str(i)][joint]])
                        joint_type = tuple(joints)
                    if joint_type in joint_com:
                        com_key[i] = joint_com[joint_type]
                symo.write_line(
                    ("\r\n\r\n# PIEPER METHOD: Branch {0}".format(i+1)) + \
                    (" is decoupled with 3 prismatic joints ") + \
                    ("positioned at {0} \r\n\r\n".format(p_joints))
                )
            else:
                for m in range(2, len(globals()["bran"+str(i)])):
                    if (robo.sigma[globals()["bran"+str(i)][m-1]] == 0) \
                        and (robo.sigma[globals()["bran"+str(i)][m]] == 0) \
                        and (robo.sigma[globals()["bran"+str(i)][m+1]] == 0):
                        if (robo.d[globals()["bran"+str(i)][m]] == 0) \
                            and (robo.d[globals()["bran"+str(i)][m+1]] == 0) \
                            and (robo.r[globals()["bran"+str(i)][m]] == 0):
                            if (sin(robo.alpha[globals()["bran"+str(i)][m]]) != 0) \
                                and (sin(robo.alpha[globals()["bran"+str(i)][m+1]]) != 0):
                                pieper_branches[i] = i
                                pieper_joints = [
                                    globals()["bran"+str(i)][m-1],
                                    globals()["bran"+str(i)][m],
                                    globals()["bran"+str(i)][m+1]
                                ]
                                joints = []
                                joint = globals()["bran"+str(i)]
                                for ji in range(len(joint)):
                                    if joint[ji] not in pieper_joints:
                                        X_joints.append(globals()["bran"+str(i)][ji])
                                        joints.append(robo.sigma[globals()["bran"+str(i)][ji]])
                                        joint_type = tuple(joints)
                                    if joint_type in joint_com:
                                        com_key[i] = joint_com[joint_type]
                                symo.write_line(
                                    ("\r\n\r\n# PIEPER METHOD: ") + \
                                    ("Branch{0}".format(i+1)) + \
                                    (" is decoupled with a ") + \
                                    ("spherical joint composed by ") + \
                                    ("joints {0} \r\n\r\n".format(pieper_joints))
                                )
                                bool_spherical[i] = 1
                                break
                            elif m == 5:
                                bool_fail[i] = 1
                                symo.write_line(try_paul_str.format(i+1))
                        elif m == 5:
                            bool_fail[i] = 1
                            symo.write_line(try_paul_str.format(i+1))
                    elif m == 5:
                        bool_fail[i] = 1
                        symo.write_line(try_paul_str.format(i+1))
    bools = [bool_fail, bool_prism, bool_spherical]
    pieper_branches = [x for x in pieper_branches if x != 999]
    com_key = [x for x in com_key if x != 999]
    return bools, pieper_branches, pieper_joints, X_joints, com_key
T = Matrix([[0, 1, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0],
            [1, 0, 0, 0, 0, 0],
            [0, 0, 0, 1, 0, 0],
            [0, 0, 0, 0, 1, 0],
            [0, 0, 1, 0, 0, 0],
            [0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 1]])

F = Matrix(len(p), len(fs), lambda i, j: fs[j].subs({x: x-p[i][0], y: y-p[i][1]}))
# P stands for scalar potential
#pprint((Nu*G).multiply_elementwise(F), num_columns=200)
P = [Matrix(1, len(p), lambda i, j: 0 if j == k else 1)*((Nu*G).multiply_elementwise(F)) for k in xrange(0, len(p))]

#pprint(P)
sensor_functions = T * (ones(1, len(p)) * (Nu * G).multiply_elementwise(F)).T

# The only thing left is to substitute

for sensor_index, sensor_function in enumerate(sensor_functions):

    # unpack coordinates expressions

    s_x, s_y = absolute_sensor_ps[sensor_index]
    print sensor_function.subs({x: s_x, y: s_y})

pprint(sensor_functions)

# sensor_functions are functions which depend on absolute Sensor position.
# Let us find expressions for every Sensor values. We need to call this function
# providing sum of Sensor relative position plus associated Agents position
Example #54
0
def test_empty_Matrix():
    sT(ones(0, 3), "MutableDenseMatrix(0, 3, [])")
    sT(ones(4, 0), "MutableDenseMatrix(4, 0, [])")
    sT(ones(0, 0), "MutableDenseMatrix([])")
Example #55
0
#drawGraph(edges11, edges2, filename = "graph_dead_end.pdf", labels = list('ABCDE'))
#drawGraph(edges1, edges2, filename = "graphx.pdf", labels=['']*5, node_size=[100,200,300,400,500])#_probability


M=get_stochastic_matrix(G)

# rand surfer
drawGraph(edges1, edges2, filename = "graph.pdf", labels = list('ABCDE'))
v=Matrix([1,0,0,0,0]).T
v_ = ([1,0,0,0,0], [0,0,0,0,1], [0, '\\frac{2}{5}','\\frac{2}{5}', '\\frac{1}{5}', 0])
for i in range(3):
    v= v_[i]
    drawGraph(edges1, edges2, filename = "graph_probability%d.pdf" %i, labels=v)#
    #v = M*v

pr = sympy.ones(len(G.nodes()),1)/len(G.nodes())
node_size=map(int, list(5000*pr))
drawGraph(edges1, edges2, filename = "graph_tansmatr0.pdf", # labels=['']*5, 
          node_size=node_size)
for i in range(1,4):
    prp = M*pr
    # print r"""\only<%i>{{ \begin{{equation*}}
    # {2} = 
    # {0}
    # \cdot
    # {1}
    # \end{{equation*}} }}""".format(*map(sympy.latex, (M, pr, prp))) % i
    # print "\n"*2
    pr = prp
    node_size=map(int, list(5000*pr))
    drawGraph(edges1, edges2, filename = "graph_tansmatr%d.pdf" % i, # labels=['']*5, 
Example #56
0
def get_stochastic_matrix(G):
    A = nx.adjacency_matrix(G.reverse(copy=True), nodelist=list('ABCDE'))
    k = Matrix((A.sum(axis=0)).astype(int)).applyfunc(lambda x: 1/x)
    A = Matrix(A.astype(int))
    S = A.multiply_elementwise(sympy.ones(5,1)*k)
    return S
Example #57
0
def Pre_Comp(L, T, Xdata, Y, Kbeta, J):
    t = sy.Symbol("t")
    s = sy.Symbol("s")
    # Récupération des variables et paramètres
    N = len(L)
    D = len(L[0])
    # ----------------- INFERENCES DES PARAMS LONGITUDINAUX
    # print("[   ] Inférence des paramètres fonctionnels")
    model = pyGPs.GPR()
    kern1 = pyGPs.cov.RBF(log_ell=0.0, log_sigma=0.0)
    kern2 = pyGPs.cov.Periodic(log_ell=0.0, log_p=0.0, log_sigma=0.0)
    kern = pyGPs.cov.SumOfKernel(kern1, kern2)
    m = pyGPs.mean.Const()
    model.setPrior(mean=m, kernel=kern)
    model.setNoise(log_sigma=-2.30258)
    Theta = np.zeros((N, D, len(model.covfunc.hyp)), float)
    Gamma = np.zeros((N, D), float)
    moy_est = np.zeros((N, D), float)
    for i in range(N):
        for j in range(D):
            y = np.asarray(Xdata[i][j])
            x = np.asarray(L[i][j])
            try:
                model.optimize(x, y)
                moy_est[i, j] = model.meanfunc.hyp[0]
                Theta[i, j, :] = np.array(np.exp(model.covfunc.hyp))
                Gamma[i, j] = np.exp(model.likfunc.hyp)
            except:
                # Problème d'inférence, paramètres défaut attribués
                moy_est[i, j] = np.mean(x)
                Theta[i, j, :] = np.array([0.05, np.std(x) ** 2, 0.05, 1.0, 0.0])
                Gamma[i, j] = 1.0
                pass

    # ----------------- RECUPERATION DES QUANTITES D'INTERET
    # print("[-  ]  Récupération des quantités d'intérêt")
    # Construction de la forme du noyau
    el1 = sy.Symbol("el1")
    sig1 = sy.Symbol("sig1")
    args1 = [el1, sig1]
    el2 = sy.Symbol("el2")
    per2 = sy.Symbol("per2")
    sig2 = sy.Symbol("sig2")
    args2 = [el2, per2, sig2]
    syk = cg.sy_RBF((s, t), *args1) + cg.sy_Periodic((s, t), *args2)
    args = [t, s] + args1 + args2
    k_fonc = sy.lambdify(tuple(args), syk, "numpy")
    Psi = Comp_Psi(L, k_fonc, Theta, Gamma)[0]
    # ----------------- Construction de la base fonctionnelle
    # print("[-- ]  Calcul des quantités d'intérêt")
    syPhi = sy.ones(Kbeta ** 2, 1)
    syb = sy.ones(1, Kbeta ** 2)
    v = [np.arange(Kbeta), np.arange(Kbeta)]
    expo = cg.expandnp(v)
    Phi_fonc = [[] for j in range(Kbeta ** 2)]
    for x in range(len(expo[:, 0])):
        syPhi[x] = (t ** expo[x, 0]) * (s ** expo[x, 1])
        Phi_fonc[x] = sy.lambdify((t, s), syPhi[x], "numpy")
        syb[x] = sy.Symbol("b" + str(x))
    syBeta = syb * syPhi
    I_pen = J22_fast(syPhi, np.max(T), 50)[3]
    # ----------------- Construction de l et V
    Un = np.ones(J + 1, float)
    Un[0] = 0.5
    Un[J] = 0.5
    Phi_mat = Comp_Phi(Phi_fonc, T, J)
    l = np.zeros((N, D * Kbeta ** 2), float)
    V = [[] for i in range(N)]
    vl = [[] for j in range(D)]
    for i in range(N):
        Phi_i = Phi_mat[(i * (J + 1)) : (i * (J + 1) + J + 1), :].T
        for j in range(D):
            Xij = Xdata[i][j]
            # Moyenne de F_ij estimée en amont
            Etaij = moy_est[i, j]
            t = L[i][j]
            grid = T[i] * 1.0 * np.arange(J + 1) / J
            vec = cg.expandnp([t, grid])
            args = list(vec.T) + list(Theta[i, j, :])
            K_ij = np.apply_along_axis(k_fonc, 0, *args).reshape(J + 1, len(t))
            K_ij[0, :] = K_ij[0, :] * 0.5
            K_ij[J, :] = K_ij[J, :] * 0.5
            KPsi = K_ij.dot(Psi[i][j])
            l[i, (j * Kbeta ** 2) : ((j + 1) * Kbeta ** 2)] = (
                Phi_i.dot(KPsi.dot(Xij - Etaij) + Etaij * Un).reshape(-1) / J
            )
            # on calcule pour k_ij, la matrice des k(s,t) pour s,t dans la grille de [0,Ti]
            vec = cg.expandnp([grid, grid])
            args = list(vec.T) + list(Theta[i, j, :])
            k_ij = np.apply_along_axis(k_fonc, 0, *args).reshape(J + 1, J + 1)
            k_ij[0, :] = k_ij[0, :] * 0.5
            k_ij[J, :] = k_ij[J, :] * 0.5
            k_ij[:, 0] = k_ij[:, 0] * 0.5
            k_ij[:, J] = k_ij[:, J] * 0.5
            Cov_FF = k_ij - KPsi.dot(K_ij.T)
            vl[j] = Phi_i.dot(Cov_FF).dot(Phi_i.T) / J ** 2
        V[i] = sc.sparse.block_diag(tuple(vl))
        # On ajoute une matrice diagonale pour rendre V[i] définie positive, mais on fait en sorte que ses valeurs propres soient petites par rapport à celles de V[i] pour ne pas trop affecter la vraisemblances sur l'espace autour de 0.
        # V[i]=V[i]+np.eye(D*Kbeta**2)*0.01*np.trace(V[i].toarray())/(D*Kbeta**2)
    return (l, V, I_pen)
#       x**3,
#       x**4,
#       x**5,
#       x**6]

F = Matrix(number_of_matters, len(fs), lambda i, j: fs[j].subs({x: x-ps[i][0], y: y-ps[i][1]}))

P = [Matrix(1, number_of_matters, lambda i, j: 0 if j == k else 1)*((Nu*G).multiply_elementwise(F)) for k in xrange(number_of_matters)]

R = ReferenceFrame('R')
M = [(P[i]*E*Nu[i, :].T)[0].subs({x: R[0], y: R[1]}) for i in xrange(0, number_of_matters)]
W = [gradient(M[i], R).to_matrix(R).subs({R[0]: x, R[1]: y})[:2] for i in xrange(number_of_matters)]

# Natural laws part

natural_field = diag(*(ones(1, number_of_matters) * ((Nu*G).multiply_elementwise(F))))

force_is_present = natural_field.applyfunc(
    lambda exp: Piecewise((1.0, exp > 0.0),
                          (0.0, True))
)

natural_influence = (Upsilon * Alpha * natural_field + S * Alpha)*force_is_present*ones(len(fs), 1)
pending_transformation_vector = Omicron.transpose()*natural_influence

Nu_new = (Nu -
          get_matrix_of_converting_atoms(Nu, ps, pending_transformation_vector) +
          get_matrix_of_converted_atoms(Nu, ps, pending_transformation_vector, natural_influence, Omicron, D))

delta_t = Symbol('Delta_t')
Example #59
0
def J22_fast(syPhi, Tmax, J):
    # taille de la base fonctionnelle
    Kbeta = len(syPhi)
    # symbole associé au petit t (c'est à dire l'instant dans la période de suivi)
    t = sy.Symbol("t")
    # smbole associé au grand T (c'est à dire la durée de suivie)
    s = sy.Symbol("s")

    deb = time.clock()
    Phi = sy.ones(Kbeta, 1)
    for i in range(Kbeta):
        Phi[i] = syPhi[i]
    # dérivation de la base fonctionnelle
    (Phi_dsds, Phi_dsdt, Phi_dtdt) = (Phi.diff(s, s), Phi.diff(s, t), Phi.diff(t, t))

    (Phi_mat_dsds, Phi_mat_dsdt, Phi_mat_dtdt) = (
        np.zeros(((J + 1) ** 2, Kbeta), float),
        np.zeros(((J + 1) ** 2, Kbeta), float),
        np.zeros(((J + 1) ** 2, Kbeta), float),
    )
    # Grille d'intégration carré (on retire le triangle supérieur plus loin)
    t_arg = sc.linspace(0, Tmax, J + 1)
    s_arg = t_arg
    args = cg.expandnp([t_arg, s_arg]).T
    for i in range(Kbeta):
        func = sy.lambdify((t, s), Phi_dsds[i], "numpy")
        Phi_mat_dsds[:, i] = np.apply_along_axis(func, 0, *args)
        func = sy.lambdify((t, s), Phi_dsdt[i], "numpy")
        Phi_mat_dsdt[:, i] = np.apply_along_axis(func, 0, *args)
        func = sy.lambdify((t, s), Phi_dtdt[i], "numpy")
        Phi_mat_dtdt[:, i] = np.apply_along_axis(func, 0, *args)

    (Is, Ic, It) = (np.zeros((Kbeta, Kbeta), float), np.zeros((Kbeta, Kbeta), float), np.zeros((Kbeta, Kbeta), float))
    Un = np.ones(J + 1, float)
    # matrice triangulaire inférieure d'intégration
    a = cg.expandnp([np.arange(J + 1), np.arange(J + 1)])
    triang = np.asarray(a[:, 0] <= a[:, 1], float).reshape((J + 1, J + 1))
    # Calcul des intégrales
    for i in range(Kbeta):
        for j in range(Kbeta):
            if i <= j:
                Is[i, j] = (
                    Un.dot(
                        Phi_mat_dsds[:, j].reshape((J + 1, J + 1)) * Phi_mat_dsds[:, i].reshape((J + 1, J + 1)) * triang
                    ).dot(Un)
                    * (Tmax ** 2)
                    / (J * (J + 1))
                )
                Ic[i, j] = (
                    Un.dot(
                        Phi_mat_dsdt[:, j].reshape((J + 1, J + 1)) * Phi_mat_dsdt[:, i].reshape((J + 1, J + 1)) * triang
                    ).dot(Un)
                    * (Tmax ** 2)
                    / (J * (J + 1))
                )
                It[i, j] = (
                    Un.dot(
                        Phi_mat_dtdt[:, j].reshape((J + 1, J + 1)) * Phi_mat_dtdt[:, i].reshape((J + 1, J + 1)) * triang
                    ).dot(Un)
                    * (Tmax ** 2)
                    / (J * (J + 1))
                )
            else:
                (Is[i, j], Ic[i, j], It[i, j]) = (Is[j, i], Ic[j, i], It[j, i])
    I_pen = Is + It + 2 * Ic
    I_pen = I_pen + np.eye(Kbeta) * 0.001 * np.trace(I_pen) / Kbeta
    return (Is, Ic, It, I_pen)