Exemplo n.º 1
0
def numerical_coeffs(Mn, n, dict_ss):
    """Numerical coefficients of polynomial of order (n - 3)! obtianed from determinant of elimination theory matrix."""
    Mn = Mn.tolist()
    Mn = [[_zs_sub(_ss_sub(str(entry))) for entry in line] for line in Mn]
    zs = punctures(n)

    values = [
        mpmath.e**(2 * mpmath.pi * 1j * j / (math.factorial(n - 3) + 1))
        for j in range(math.factorial(n - 3) + 1)
    ]

    A = [[
        value**exponent for exponent in range(math.factorial(n - 3) + 1)[::-1]
    ] for value in values]
    b = []

    for i, value in enumerate(values):
        dict_zs = {
            str(zs[-2]): value,
            str(zs[-3]): 1
        }  # noqa --- used in eval function
        nMn = mpmath.matrix([[eval(entry, None) for entry in line]
                             for line in Mn])
        b += [mpmath.det(nMn)]

    return mpmath.lu_solve(A, b).T.tolist()[0]
Exemplo n.º 2
0
def wronsk(e0, e1, st, u, d):
    e = e0
    detar = []
    enar = []
    while e <= e1:
        dar = np.zeros((2 * N + 2, 2 * N + 2))
        shootRq(e, u, d, dar)
        shootLq(e, u, d, dar)
        M = mp.matrix(2 * N + 2, 2 * N + 2)
        for i in range(2 * N + 2):
            for j in range(2 * N + 2):
                M[i, j] = mp.mpf(str(dar[i, j]))
        det = mp.det(M)
        #det = np.linalg.det(dar)
        print("det=%s" % det)
        print(dar)
        detar.append(det)
        enar.append(e)
        e += st

    print("Plotting")
    plt.plot(enar,
             detar,
             'b',
             label='Wronskian d=%s, h = %s, u = %s, lambda = %s' %
             (d, h, u, lam))
    plt.legend(loc='best')
    plt.xlabel('e')
    plt.grid()
    plt.show()
Exemplo n.º 3
0
def adj(A):
    d = A.rows
    M = mpmath.matrix(d)
    for i in range(0, d):
        for j in range(0, d):
            M[i, j] = ((-1)**(i + j)) * mpmath.det(minor(j, i, A))

    return M
Exemplo n.º 4
0
    def polish(self, init_shapes, flag_initial_error=True):
        """Use Newton's method to compute precise shapes from rough ones."""
        precision = self._precision
        manifold = self.manifold
        #working_prec = precision + 32
        working_prec = precision + 64
        mpmath.mp.prec = working_prec
        target_epsilon = mpmath.mpmathify(2.0)**-precision
        det_epsilon = mpmath.mpmathify(2.0)**(32 - precision)
        #shapes = [mpmath.mpmathify(z) for z in init_shapes]
        shapes = mpmath.matrix(init_shapes)
        init_equations = manifold.gluing_equations('rect')
        target = mpmath.mpmathify(self.target_holonomy)
        error = self._gluing_equation_error(init_equations, shapes, target)
        if flag_initial_error and error > 0.000001:
            raise GoodShapesNotFound(
                'Initial solution not very good: error=%s' % error)

        # Now begin the actual computation

        eqns = enough_gluing_equations(manifold)
        assert eqns[-1] == manifold.gluing_equations('rect')[-1]
        for i in range(100):
            errors = self._gluing_equation_errors(eqns, shapes, target)
            if infinity_norm(errors) < target_epsilon:
                break
            derivative = [[
                int(eqn[0][i]) / z - int(eqn[1][i]) / (1 - z)
                for i, z in enumerate(shapes)
            ] for eqn in eqns]
            derivative[-1] = [target * x for x in derivative[-1]]
            derivative = mpmath.matrix(derivative)
            #det = derivative.matdet().abs()
            det = abs(mpmath.det(derivative))
            if min(det, 1 / det) < det_epsilon:
                raise GoodShapesNotFound(
                    'Gluing system is too singular (|det| = %s).' % det)
            delta = mpmath.lu_solve(derivative, errors)
            shapes = shapes - delta

        # Check to make sure things worked out ok.
        error = self._gluing_equation_error(init_equations, shapes, target)
        #total_change = infinity_norm(init_shapes - shapes)
        if error > 1000 * target_epsilon:
            raise GoodShapesNotFound('Failed to find solution')
        #if flag_initial_error and total_change > pari(0.0000001):
        #    raise GoodShapesNotFound('Moved too far finding a good solution')
        self.shapelist = [Number(z, precision=precision) for z in shapes]
Exemplo n.º 5
0
    def pdfx(self, datum):
        size = len(datum)

        datum = mp.matrix(datum)
        sigma = mp.matrix(self.sigma)
        mu = mp.matrix(self.mu)
        det = mp.det(sigma)

        N = ((2 * math.pi)**size * det)**0.5

        x_mu = (datum - mu)

        inv = sigma**-1

        #print N,np.array(x_mu.T.tolist()).shape,np.array(inv.tolist()).shape
        result = -0.5 * x_mu.T * (inv * x_mu)
        #print result.tolist()[0][0]
        result = mp.power(math.e, result.tolist()[0][0])
        return result / N
Exemplo n.º 6
0
    def get_BCMatrix_3sec_axt_det(self):

        mp.mp.dps = self.prec

        #use symbolic mode to create symbolic BCMatrix that is then transformed with sympy lambdify into a mp.math matrix
        if self.mode == "symbolic":
            matrix_start = time.time()
            mat = self.BCMatrix_3sec_axt_lambda(self.tau11, self.tau21,
                                                self.tau12, self.tau22,
                                                self.tau13, self.tau23,
                                                self.E_1, self.E_2, self.E_3,
                                                self.I_1, self.I_2, self.I_3,
                                                self.parameters['L1'], self.parameters['L2'], self.parameters['L3'])
            self.matrix_time = time.time() - matrix_start
        else:
            matrix_start = time.time()
            self.get_BC_Matrix()
            mat = self.BCM
            self.matrix_time = time.time() - matrix_start
        det_start = time.time()
        det = mp.det(mat)
        self.det_time = time.time() - det_start
        self.kappa = 0
        # calculate condition number kappa from stretching factors in sigma by using SVD
        # sigma = mp.svd_r(mat, compute_uv=False)
        # zeros in sigma represent a matrix without full rank, meaning the BCMatrix is singular due to low precision
        # if min(sigma) == 0:
        #     kappa = max(sigma) / min(sigma)
        #     kappa_float = np.float(mp.log10(kappa))
        #     self.kappa = kappa_float

        if det == 0:
            warnings.warn("BCMatrix is singular, a much higher precision is needed!")
            self.kappa = np.inf
            return -np.inf
        else:

        # check legitemacy of log transform around the zero transition (log transform predicts +-1 transition?)
            return np.float64(mp.log(mp.fabs(det)) * mp.sign(det))
def get_cp_log(Sigma, diagonal = False):
    D= Sigma.shape[0]
    
    if (diagonal == False):
        try:
#            L = cholesky(Sigma)
#            log_det = np.sum(2*np.log(np.diag(L)))
            det_K = mpm.det(Sigma + 1e-100*np.eye(D))
    #        print(det_K)
            log_det = float(mpm.log(det_K))
    #        print (log_det)
    #        det_K = np.linalg.det(K[:N_det,:N_det]+ 1e-10*np.eye(N_det))   # Determinant ! "Noisiness of the kernel"

        except RuntimeError as err: 
            return None
    else:
        log_det = np.sum(np.log(np.diagonal(Sigma+ 1e-100*np.eye(D))))
#        print "Gaussian fet"
    cp_log = -(1/2.0)*(D*np.log(2*np.pi) +  log_det)
    
#    print (cp_log)
    return cp_log
Exemplo n.º 8
0
def logpdf(x, nu, loc, scale, scale_inv=None):
    """
    Natural logarithm of the PDF for the multivariate t distribution.

    `loc` must be a sequence.  `scale` is the scale matrix; it
    must be an instance of `mpmath.matrix`.  `scale` must be
    positive definite.

    If given, `scale_inv` must be the inverse of `scale`.
    """

    p = mpmath.mpf(len(loc))
    with mpmath.extradps(5):
        nu = mpmath.mpf(nu)
        if scale_inv is None:
            with mpmath.extradps(5):
                scale_inv = mpmath.inverse(scale)
        tmp = mpmath.matrix(scale.cols, 1)
        for k, v in enumerate(loc):
            tmp[k] = mpmath.mpf(v)
        loc = tmp
        tmp = mpmath.matrix(scale.cols, 1)
        for k, v in enumerate(x):
            tmp[k] = mpmath.mpf(v)
        x = tmp
        delta = x - loc
        c = (nu + p) / 2
        t1 = -c * mpmath.log1p((delta.T * scale_inv * delta)[0, 0] / nu)
        t2 = mpmath.loggamma(c)
        t3 = mpmath.loggamma(nu / 2)
        t4 = (p / 2) * mpmath.log(nu)
        t5 = (p / 2) * mpmath.log(mpmath.pi)
        with mpmath.extradps(5):
            det = mpmath.det(scale)
        t6 = mpmath.log(det) / 2
        return t2 - t3 - t4 - t5 - t6 + t1
Exemplo n.º 9
0
def Bc(phi,xi):
    matB = [[fgamma(phi+3)*fpolylog(phi+2,xi),fgamma(phi+2)*fpolylog(phi+1,xi),xi*power((1-xi),-2)],
            [fgamma(phi+4)*fpolylog(phi+2,xi),fgamma(phi+3)*fpolylog(phi+1,xi),0],
            [fgamma(phi+3)*fpolylog(phi+1,xi),fgamma(phi+2)*fpolylog(phi,xi),xi*(xi+1)*power((1-xi),-3)]]
    #print(matB)
    return det(matB)
Exemplo n.º 10
0
def B(phi,xi):
    matB = [[fgamma(phi+3)*fpolylog(phi+2,xi),fgamma(phi+2)*fpolylog(phi+1,xi),fgamma(phi+1)*fpolylog(phi,xi)],
            [fgamma(phi+4)*fpolylog(phi+2,xi),fgamma(phi+3)*fpolylog(phi+1,xi),fgamma(phi+2)*fpolylog(phi,xi)],
            [fgamma(phi+3)*fpolylog(phi+1,xi),fgamma(phi+2)*fpolylog(phi,xi),fgamma(phi+1)*fpolylog(phi-1,xi)]]
    #print(matB)
    return det(matB)
Exemplo n.º 11
0
    gaussian distribution. The relation between the noises will be given by the
    covariance matrix C. This will tell how big the noises are and how they relate
    to each other.
    
    We will use a basic kernel for now

"""
################  Covariance Matrix "K" #################################
 #We compute the distances among each pair of points in X_grid

K = get_Kernel(tgrid, kernel_type = "1", l = 0.0000001, sigma_noise = 0.03)

############# Compute properties of the Kernel ########################
N_det = 30 # Number of samples to compute the determinant from 

det_K2 = mpm.det(K[:N_det,:N_det]+ 1e-10*np.eye(N_det))
det_K = np.linalg.det(K[:N_det,:N_det]+ 1e-10*np.eye(N_det))   # Determinant ! "Noisiness of the kernel"
                           # The bigger the determinant, the more random ? 
print (["Determinant by mpm:", det_K2])
print (["Determinant by numpy:", det_K])
# Choleski factorization ! To sample from the Multivatiate Gaussian.
# To ensure that it is positive definite we add a small noise to the diagonal
L = np.linalg.cholesky(K+1e-10*np.eye(N))


if (plot_stochastic_process and plot_flag):
    
    ## Plot the covariance matrix ! 
    # Show the Nshow first samples
    
    Nshow = 20
Exemplo n.º 12
0
 ['0.0', '0.49062853002881556', '0.0', '0.49062866702173223', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0'],
 ['0.88203688184375317', '0.47118036787131636', '1.1227920515867501', '0.51055067437658265', '-0.85084783450537795', '-0.52541218344982159', '-1.1569572051744931', '-0.5818504744392446', '0.0', '0.0', '0.0', '0.0'],
 ['-0.2311745312671405', '0.43275245877020069', '0.2504907968164292', '0.55087396761260321', '0.2906577462388459', '-0.4706885789091225', '-0.32187947109667221', '-0.64002830562602743', '0.0', '0.0', '0.0', '0.0'],
 ['157946997.87865532', '84374617.543281037', '-201059316.76444157', '-91424738.551285949', '-88143244.788399123', '-54429867.271798491', '119854592.2077822', '60276604.033337957', '0.0', '0.0', '0.0', '0.0'],
 ['41396594.577003489', '-77493303.391669111', '44855597.608227799', '98645464.576438188', '-30110574.226551734', '48760797.110106846', '-33344995.45515151', '-66303516.870942348', '0.0', '0.0', '0.0', '0.0'],
 ['0.0', '0.0', '0.0', '0.0', '0.44788407496498205', '0.89409163702204616', '1.6770999492103481', '1.3463521974733625', '-0.55597812186530204', '-0.83119692492641679', '-1.5213239822127666', '-1.146484478244564'],
 ['0.0', '0.0', '0.0', '0.0', '-0.49461102793907139', '0.2477692370032844', '0.74480154656609981', '0.92777108267950682', '0.4078089254411196', '-0.2727787286589548', '-0.56249815132223653', '-0.74640515750124306'],
 ['0.0', '0.0', '0.0', '0.0', '46398373.546327205', '92623069.401249781', '-173738604.68242844', '-139474902.67964052', '-99559414.172426251', '-148843049.12926965', '272424764.68263935', '205301939.52756113'],
 ['0.0', '0.0', '0.0', '0.0', '51239033.752749977', '-25667556.080590032', '77157465.496696528', '96112132.99799967', '-73026646.399300336', '48846689.025947514', '-100727016.92738359', '-133659399.1599524'],
 ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.098749536123081272', '0.99511231984910943', '2.2934688986468432', '2.0639766445045748'],
 ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '-0.48823049470113306', '0.048449339749094783', '1.0126461098572671', '1.1252415885989009']])

# referrence mpmath determinant computation
start_1 = time.time()

det_1 = np.float64(mp.log(mp.fabs(mp.det(A))) * mp.sign(mp.det(A)))

time_1 = time.time() - start_1

# chebyshev approximation
start_2 = time.time()

sv_vector = mp.matrix(12, 2)
A_abs = mp.matrix(12)
for i in range(12):
    for j in range(12):
        A_abs[i, j] = abs(A[i, j])

for i in range(12):
    sv_vector[i, 0] = A_abs[i, i] - sum(A_abs[i, :])
    sv_vector[i, 1] = A_abs[i, i] - sum(A_abs[:, i])
Exemplo n.º 13
0
def det(mat):
    if mode == mode_python:
        return np.linalg.det(mat)
    else:
        return mpmath.det(mat)
Exemplo n.º 14
0
def solve_scattering_equations(n, dict_ss):
    """Solves the scattering equations given multiplicity and mandelstams."""

    if n == 3:
        return [{}]

    Mn = M(n)
    zs = punctures(n)

    num_coeffs = numerical_coeffs(Mn, n, dict_ss)
    roots = mpmath.polyroots(num_coeffs, maxsteps=10000, extraprec=300)
    sols = [{str(zs[-2]): root * zs[-3]} for root in roots]

    if n == 4:
        sols = [{
            str(zs[-2]):
            mpmath.mpc(sympy.simplify(sols[0][str(zs[-2])].subs({zs[1]: 1})))
        }]
    else:
        Mnew = copy.deepcopy(Mn)
        Mnew[:, 0] += Mnew[:, 1] * zs[1]
        Mnew.col_del(1)
        Mnew.row_del(-1)

        # subs
        sol = sols[0]
        Mnew = Mnew.tolist()
        Mnew = [[
            _zs_sub(_ss_sub(str(entry))).replace(
                "dict_zs['z{}']".format(n - 1),
                "dict_zs['z{}'] * mpmath.mpc(sol[str(zs[-2])] / zs[-3])".
                format(n - 2)) for entry in line
        ] for line in Mnew]

        # get scaling
        if n == 5:
            scaling = 0
        elif n == 6:
            scaling = 2
        elif n == 7:
            scaling = 17
        else:  # computing from scratch, should work for any multiplicity in principle
            dict_zs = {str(zs[-3]): 10**-100, str(zs[1]): 1}
            nMn = mpmath.matrix([[eval(entry, None) for entry in line]
                                 for line in Mnew])
            a = mpmath.det(nMn)
            dict_zs = {str(zs[-3]): 10**-101, str(zs[1]): 1}
            nMn = mpmath.matrix([[eval(entry, None) for entry in line]
                                 for line in Mnew])
            b = mpmath.det(nMn)
            scaling = -round(mpmath.log(abs(b) / abs(a)) / mpmath.log(10))
            assert (abs(
                round(mpmath.log(abs(b) / abs(a)) / mpmath.log(10)) -
                mpmath.log(abs(b) / abs(a)) / mpmath.log(10)) < 10**-30)

        # solve the linear equations
        for i in range(1, n - 3):
            Mnew = copy.deepcopy(Mn)
            index = V(n).index(zs[i])
            Mnew[:, 0] += Mnew[:, index] * zs[i]
            Mnew.col_del(index)
            Mnew.row_del(-1)
            Mnew = Mnew.tolist()
            if i == 1:
                Mnew = [[
                    _zs_sub(_ss_sub(str(entry))).replace(
                        "dict_zs['z{}']".format(n - 1),
                        "dict_zs['z{}'] * mpmath.mpc(sol[str(zs[-2])] / zs[-3])"
                        .format(n - 2)) for entry in line
                ] for line in Mnew]
                for sol in sols:
                    A = [[value**exponent for exponent in [1, 0]]
                         for value in [-1, 1]]
                    b = []
                    for value in [-1, 1]:
                        dict_zs = {str(zs[-3]): value, str(zs[1]): 1}
                        nMn = mpmath.matrix(
                            [[eval(entry, None) for entry in line]
                             for line in Mnew])
                        b += [mpmath.det(nMn) / (value**scaling)]
                    coeffs = mpmath.lu_solve(A, b).T.tolist()[0]
                    sol[str(zs[-3])] = -coeffs[1] / coeffs[0]
                    sol[str(zs[-2])] = mpmath.mpc((sympy.simplify(sol[str(
                        zs[-2])].subs({zs[-3]: sol[str(zs[-3])]}))))
            else:
                Mnew = [[_zs_sub(_ss_sub(str(entry))) for entry in line]
                        for line in Mnew]

                for sol in sols:
                    A = [[value**exponent for exponent in [1, 0]]
                         for value in [-1, 1]]
                    b = []
                    for value in [-1, 1]:
                        dict_zs = {
                            str(zs[i]): value,
                            str(zs[-3]): sol[str(zs[-3])],
                            str(zs[-2]): sol[str(zs[-2])]
                        }  # noqa --- used in eval function
                        nMn = mpmath.matrix(
                            [[eval(entry, None) for entry in line]
                             for line in Mnew])
                        b += [mpmath.det(nMn)]
                    coeffs = mpmath.lu_solve(A, b).T.tolist()[0]
                    sol[str(zs[i])] = -coeffs[1] / coeffs[0]

    return sols
Exemplo n.º 15
0
    gaussian distribution. The relation between the noises will be given by the
    covariance matrix C. This will tell how big the noises are and how they relate
    to each other.
    
    We will use a basic kernel for now

"""
################  Covariance Matrix "K" #################################
#We compute the distances among each pair of points in X_grid

K = get_Kernel(tgrid, kernel_type="1", l=0.0000001, sigma_noise=0.03)

############# Compute properties of the Kernel ########################
N_det = 30  # Number of samples to compute the determinant from

det_K2 = mpm.det(K[:N_det, :N_det] + 1e-10 * np.eye(N_det))
det_K = np.linalg.det(
    K[:N_det, :N_det] +
    1e-10 * np.eye(N_det))  # Determinant ! "Noisiness of the kernel"
# The bigger the determinant, the more random ?
print(["Determinant by mpm:", det_K2])
print(["Determinant by numpy:", det_K])
# Choleski factorization ! To sample from the Multivatiate Gaussian.
# To ensure that it is positive definite we add a small noise to the diagonal
L = np.linalg.cholesky(K + 1e-10 * np.eye(N))

if (plot_stochastic_process and plot_flag):

    ## Plot the covariance matrix !
    # Show the Nshow first samples
Exemplo n.º 16
0
    def FindRelativeMotionMultiPoints_mpmath(self, base_frame_ind,
                                             targ_frame_ind, pnt_ids,
                                             pnt_depthes):
        eltype = self.elem_type
        points_num = len(pnt_ids)
        assert points_num == len(pnt_depthes)

        # estimage camera position [R,T] given distances to all 3D points pj in frame1
        x_img_skew = mpmath.matrix(3, 3)
        A = mpmath.matrix(3 * points_num, 12)
        for i, pnt_id in enumerate(pnt_ids):
            pnt_ind = pnt_id
            pnt_life = self.points_life[pnt_ind]
            x1 = pnt_life.points_list_meter[base_frame_ind]

            x_img = pnt_life.points_list_meter[targ_frame_ind]
            skewSymmeticMatWithAdapter(x_img, lambda x: mpmath.mpf(str(x)),
                                       x_img_skew)

            A[3 * i:3 * (i + 1), 0:3] = x_img_skew * mpmath.mpf(str(x1[0]))
            A[3 * i:3 * (i + 1), 3:6] = x_img_skew * mpmath.mpf(str(x1[1]))
            A[3 * i:3 * (i + 1), 6:9] = x_img_skew * mpmath.mpf(str(x1[2]))

            depth = mpmath.mpf(str(pnt_depthes[i]))
            alph = 1 / depth
            A[3 * i:3 * (i + 1), 9:12] = x_img_skew * alph

        #
        u1, dVec1, vt1 = mpmath.svd_r(A)

        # take the last column of V
        cam_R_noisy = mpmath.matrix(3, 3)
        for col in range(0, 3):
            for row in range(0, 3):
                cam_R_noisy[row, col] = vt1[11, col * 3 + row]
        cam_Tvec_noisy = mpmath.matrix(3, 1)
        for row in range(0, 3):
            cam_Tvec_noisy[row, 0] = vt1[11, 9 + row]

        # project noisy [R,T] onto SO(3) (see MASKS, formula 8.41 and 8.42)
        u2, dVec2, vt2 = mpmath.svd(cam_R_noisy)

        no_guts = u2 * vt2
        no_guts_det = mpmath.det(no_guts)
        sign1 = mpmath.sign(no_guts_det)
        frame_R = no_guts * sign1

        det_den = dVec2[0] * dVec2[1] * dVec2[2]
        if math.isclose(0, det_den):
            return False, None

        t_factor = sign1 / rootCube(det_den)
        cam_Tvec = cam_Tvec_noisy * t_factor

        if sum(1 for a in cam_Tvec if not math.isfinite(a)) > 0:
            print("error: nan")
            return False, None

        # convert results into default precision type
        rel_R = np.zeros((3, 3), dtype=eltype)
        rel_T = np.zeros(3, dtype=eltype)
        for row in range(0, 3):
            for col in range(0, 3):
                rel_R[row, col] = float(frame_R[row, col])
            rel_T[row] = cam_Tvec[row, 0]

        p_err = [""]
        assert IsSpecialOrthogonal(rel_R, p_err), p_err[0]

        return True, (rel_R, rel_T)
Exemplo n.º 17
0
 def detJ(self, sol, oParticles):
     """Numerical determinant of reduced Jacobian matrix Phi."""
     return mpmath.det(
         mpmath.matrix([[eval(entry, None) for entry in line]
                        for line in self.sPhi])) if self.sPhi != [] else 1
Exemplo n.º 18
0
def gram_matrix(N,weight,prec=501,tol=1E-40,sv_min=1E-1,sv_max=1E15,bl=None,set_dim=None,force_prec=False):
    r""" Computes a matrix of p_{r,D}(r',D')
    for a basis of P_{r,D}, i.e. dim linearly independent P's
    INPUT: N      = Integer
           weight = Real
    OPTIONAL: 
           tol    = error bound for the Poincaré series
           sv_min = minimal allowed singular value when determining whether a given set is linarly independent or not.
           sv_max = maximally allowed singular value
           bl     = list of pairs (D_i,r_i) from which  we compute a matrix of coeffficients p_{D_i,r_i}(D_j,r_j)
        """
    # If we have supplied a list of D's and r's we make a gram matrix relative to these
    # otherwise we find a basis, i.e. linearly independent forms with correct dimension
    # find the dimension
    wt='%.4f'% weight
    if(N<10):
        stN="0"+str(N)
    else:
        stN=str(N)
    v=dict()
    filename_work="__N"+stN+"-"+wt+"--finding basis.txt"
    fp=open(filename_work,"write")
    fp.write("starting to find basis")
    fp.close()
    if(silent>0):
        print "Forcing precision:",force_prec
    set_silence_level(0)
    if(bl<>None): 
        dim=len(bl)
        l=bl
    else:
        if(set_dim<>None and set_dim >0):
            dim=set_dim
        else:
            dim=dimension_jac_cusp_forms(int(weight+0.5),N,-1)
        l=list_of_basis(N,weight,prec,tol,sv_min,sv_max,set_dim=dim)
    j=0
    for [D,r] in l.values():
        for [Dp,rp] in l.values():
            # Recall that the gram matrix is symmetric. We need only compute the upper diagonal
            if(v.values().count([Dp,rp,D,r])==0):
                v[j]=[D,r,Dp,rp]
                j=j+1
    # now v is a list we can get into computing coefficients
    # first we print the "gram data" (list of indices) to the file 
    s=str(N)+": (AI["+str(N)+"],["
    indices=dict()
    for j in range(len(l)):
        Delta=l[j][0]
        r=l[j][1]
        diff=(r*r-Delta) % (4*N)
        if(diff<>0):
            raise ValueError, "ERROR r^2=%s not congruent to Delta=%s mod %s!" %(r*r, Delta, 4*N)
        s=s+"("+str(Delta)+","+str(r)+")"
        indices[j]=[Delta,r]
        if(j<len(l)-1):
            s=s+","
        else:
            s=s+"]),"
    s=s+"\n"
    if(silent>0):
        print s+"\n"
    filename2="PS_Gramdata"+stN+"-"+wt+".txt"
    fp=open(filename2,"write")
    fp.write(s)
    fp.close()
    try:
        os.remove(filename_work)
    except os.error:
        print "Could not remove file:",filename_work
        pass
    filename_work="__N"+stN+"-"+wt+"--computing_gram_matrix.txt"
    fp=open(filename_work,"write")
    fp.write("")
    fp.close()
    #print "tol=",tol
    #set_silence_level(2)
    #print "force_prec(gram_mat)=",force_prec
    res=ps_coefficients_holomorphic_vec(N,weight,v,tol,prec,force_prec=force_prec)
    set_silence_level(0)

    res['indices']=indices
    maxerr=0.0
    for j in res['errs'].keys():
        tmperr=abs(res['errs'][j])
        #print "err(",j,")=",tmperr
        if(tmperr>maxerr):
            maxerr=tmperr
        # switch format for easier vewing
        res['errs'][j]=RR(tmperr)
    if(silent>0):
        print "maxerr=",RR(maxerr)
    res['maxerr']=maxerr
    wt_phalf='%.4f'% (weight+0.5)
    filename3="PS_Gramerr"+stN+"-"+wt+".txt"
    fp=open(filename3,"write")
    wt
    s="MAXERR["+wt_phalf+"]["+stN+"]="+str(RR(maxerr))
    fp.write(s)
    fp.close()
    if(res['ok']):
        Cps=res['data']
    else:
        print "Failed to compute Fourier coefficients!"
        return 0
    RF=RealField(prec)
    A=matrix(RF,dim)
    kappa=weight
    fourpi=RF(4.0)*pi.n(prec)
    one=RF(1.0)
    N4=RF(4*N)
    C=dict()
    if(silent>1):
        print "v=",v
        print "dim=",dim
    lastix=0
    # First set the upper right part of A
    for j in range(dim):
        ddim=dim-j
        if(silent>1):
            print "j=",j,"ddim=",ddim," lastix=",lastix
        for k in range(0,ddim):
            # need to scale with |D|^(k+0.5)
            if(silent>1):
                print "k=",k
                print "lastix+k=",lastix+k
            mm=RF(abs(v[lastix+k][0]))/N4
            tmp=RF(mm**(weight-one))
            if(silent>1):
                print "ddim+k=",ddim+k
            A[j,j+k]=Cps[lastix+k]*tmp
            C[v[lastix+k][0],v[lastix+k][1]]=Cps[lastix+k]
        lastix=lastix+k+1
    # And add the lower triangular part to mak the matrix symmetric
    for j in range(dim):
        for k in range(0,j):
            A[j,k]=A[k,j]
    # And print the gram matrix
    res['matrix']=A
    dold=mpmath.mp.dps
    mpmath.mp.dps=int(prec/3.3)
    AInt=mpmath.matrix(int(A.nrows()),int(A.ncols()))
    AMp=mpmath.matrix(int(A.nrows()),int(A.ncols()))
    for ir in range(A.nrows()):
        for ik in range(A.ncols()):
            AInt[ir,ik]=mpmath.mpi(A[ir,ik]-tol,A[ir,ik]+tol)
            AMp[ir,ik]=mpmath.mpf(A[ir,ik])
    d=mpmath.det(AMp)
    if(silent>1):
        print "det(A-as-mpmath)=",d
    di=mpmath.det(AInt)
    if(silent>1):
        print "det(A-as-interval)=",di
    res['det']=(RF(di.a),RF(di.b))
    
    filename="PS_Gram"+stN+"-"+wt+".txt"
    if(silent>1):
        print "printing to file: "+filename
    print_matrix_to_file(A,filename,'A['+str(N)+']')
    if(silent>1):
        print "A-A.transpose()=",norm(A-A.transpose())
    B=A^-1
    #[d,B]=mat_inverse(A)
    if(silent>1):
        print "A=",A.n(100)
        print "det(A)=",di
        print "Done making inverse!"
    #res['det']=d
    res['inv']=B
    mpmath.mp.dps=dold
    filename="PS_Gram-inv"+stN+"-"+wt+".txt"        
    print_matrix_to_file(B,filename,' AI['+str(N)+']')
    # first make the filename
    s='%.1e'%tol
    filename3="PS_Coeffs"+stN+"-"+wt+"-"+s+".sobj"
    # If the file already exist we load it and append the new data
    if(silent>0):
        print "saving data to ",filename3
    try:
        f=open(filename3,"read")
    except IOError:
        if(silent>0):
            print "no file before!"
        # do nothing
    else:
        if(silent>0):
            print "file: "+filename3+" exists!"
        f.close()
        Cold=load(filename3)
        for key in Cold.keys():
            #                print"key:",key
            if(not C.has_key(key)): # then we addd it
                print"key:",key," does not exist in the new version!"
                C[key]=Cold[key]
                save(C,filename3)
    ## Save the whole thing
    filename="PS_all_gram"+stN+"-"+wt+".sobj"
    save(res,filename) 
    ## our work is comleted and we can remove the file
    try:
        os.remove(filename_work)
    except os.error:
        print "Could not remove file:",filename_work
        pass
    return res
Exemplo n.º 19
0
def Cramer(mat, prec):
    título("Cramer", "=")
    # Precisão de Dígitos
    mm.mp.dps = prec

    # Número de Variáveis
    nV = len(mat)

    # Matriz A
    matA = []
    for l in range(nV):
        matA.append([])
        for c in range(nV):
            matA[l].append(mat[l][c])
    matA = mm.matrix(matA)
    print("Matriz A")
    print(str(matA))

    # Determinante de A
    detA = mm.det(matA)
    print("Determinante de A = " + str(detA))

    # Matriz B
    matB = []
    c = nV
    for l in range(nV):
        matB.append(mat[l][c])
    matB = mm.matrix(matB)
    print("Matriz B")
    print(str(matB))

    # Matriz Inversa de A
    # matInvA = matA**-1
    # print("Matriz Inversa A")
    # print(str(matInvA))

    # Verificar Matriz Inversa A
    # matVer = matA*matInvA
    # print("matriz A * Matriz Inversa A")
    # print(str(matVer))

    # Matriz X
    # matX = matInvA * matB
    # print("Matriz X")
    # print(str(matX))

    # Matriz X
    matX = []
    for n in range(nV):
        # Matriz An
        matAn = []
        for l in range(nV):
            matAn.append([])
            for c in range(nV):
                if c == n:
                    matAn[l].append(matB[l])
                else:
                    matAn[l].append(matA[l, c])
        matAn = mm.matrix(matAn)
        print(f"Matriz A{n}")
        print(str(matAn))
        detAn = mm.det(matAn)
        print(f"Determinante de A{n} = " + str(detAn))
        xn = mm.mpf(detAn / detA)
        matX.append(xn)
    matX = mm.matrix(matX)
    print("Matriz X")
    print(str(matX))

    # Erro
    matE = mm.residual(matA, matX, matB)
    print("Matriz Erro Residual")
    print(str(matE))

    # Resposta e Resíduo
    resp = "Cramer:"
    for l in range(nV):
        resp += "\nx(" + str(l + 1) + ") = " + str(matX[l])
    ress = "Resíduo:"
    for l in range(nV):
        ress += "\nx(" + str(l + 1) + ") = " + str(matE[l])

    return resp, ress
Exemplo n.º 20
0
def list_of_basis(N,
                  weight,
                  prec=501,
                  tol=1e-20,
                  sv_min=1E-1,
                  sv_max=1E15,
                  set_dim=None):
    r""" Returns a list of pairs (r,D) forming a basis
    """
    # First we find the smallest Discriminant for each of the components
    if (set_dim <> None and set_dim > 0):
        dim = set_dim
    else:
        dim = dimension_jac_cusp_forms(int(weight + 0.5), N, -1)
    basislist = dict()
    num_gotten = 0
    co_tmp = dict()
    num_gotten = 0
    C0 = 1
    RF = RealField(prec)
    if (silent > 1):
        print "N=", N
        print "dim=", dim
        print "sv_min=", sv_min
        print "sv_max=", sv_max
    Aold = Matrix(RF, 1)
    tol0 = 1E-20  #tol
    # we start with the first discriminant, then the second etc.
    Z2N = IntegerModRing(2 * N)
    ZZ4N = IntegerModRing(4 * N)
    for Dp in [1..max(1000, 100 * dim)]:
        D = -Dp  # we use the dual of the Weil representation
        D4N = ZZ4N(D)
        if (not (is_square(D4N))):
            continue
        for r in my_modsqrt(D4N, N):
            # I want to make sure that P_{(D,r)} is independent from the previously computed functions
            # The only sure way to do this is to compute all submatrices (to a much smaller precision than what we want at the end)
            # The candidate is [D,r] and we need to compute the vector of [D,r,D',r']
            # for all D',r' already in the list
            ltmp1 = dict()
            ltmp2 = dict()
            j = 0
            for [Dp, rp] in basislist.values():
                ltmp1[j] = [D, r, Dp, rp]
                ltmp2[j] = [Dp, rp, D, r]
                j = j + 1
            ltmp1[j] = [D, r, D, r]
            #print "Checking: D,r,D,r=",ltmp1
            ctmp1 = ps_coefficients_holomorphic_vec(N, weight, ltmp1, tol0)
            # print "ctmp1=",ctmp1
            if (j > 0):
                #print "Checking: D,r,Dp,rp=",ltmp2    # Data is ok?: {0: True}
                ctmp2 = ps_coefficients_holomorphic_vec(N, weight, ltmp2, tol0)
                # print "ctmp2=",ctmp2
            #print "num_gotten=",num_gotten
            A = matrix(RF, num_gotten + 1)
            # The old matrixc with the elements that are already added to the basis
            # print "Aold=\n",A,"\n"
            # print "num_gotten=",num_gotten
            # print "Aold=\n",Aold,"\n"
            for k in range(Aold.nrows()):
                for l in range(Aold.ncols()):
                    A[k, l] = Aold[k, l]
                    # endfor
                    # print "A set by old=\n",A,"\n"
                    # Add the (D',r',D,r) for each D',r' in the list
            tmp = RF(1.0)
            for l in range(num_gotten):
                # we do not use  the scaling factor when
                # determining linear independence
                # mm=RF(abs(ltmp2[l][0]))/N4
                # tmp=RF(mm**(weight-one))
                A[num_gotten, l] = ctmp2['data'][l] * tmp
                # Add the (D,r,D',r') for each D',r' in the list
                # print "ctmp1.keys()=",ctmp1.keys()
            for l in range(num_gotten + 1):
                #mm=RF(abs(ltmp1[l][2]))/4N
                #tmp=RF(mm**(weight-one))
                # print "scaled with=",tmp.n(200)
                A[l, num_gotten] = ctmp1['data'][l] * tmp
            #[d,B]=mat_inverse(A) # d=det(A)
            #if(silent>1):
            #d=det(A)
            #print "det A = ",d
            # Now we have to determine whether we have a linearly independent set or not
            dold = mpmath.mp.dps
            mpmath.mp.dps = int(prec / 3.3)
            AInt = mpmath.matrix(int(A.nrows()), int(A.ncols()))
            AMp = mpmath.matrix(int(A.nrows()), int(A.ncols()))
            if (silent > 0):
                print "tol0=", tol0
            for ir in range(A.nrows()):
                for ik in range(A.ncols()):
                    AInt[ir, ik] = mpmath.mp.mpi(A[ir, ik] - tol0,
                                                 A[ir, ik] + tol0)
                    AMp[ir, ik] = mpmath.mpf(A[ir, ik])

            d = mpmath.det(AMp)
            di = mpmath.mp.mpi(mpmath.mp.det(AInt))
            #for ir in range(A.nrows()):
            #    for ik in range(A.ncols()):
            #        #print "A.d=",AInt[ir,ik].delta
            if (silent > 0):
                print "mpmath.mp.dps=", mpmath.mp.dps
                print "det(A)=", d
                print "det(A-as-interval)=", di
                print "d.delta=", di.delta
            #if(not mpmath.mpi(d) in di):
            #    raise ArithmeticError," Interval determinant not ok?"
            #ANP=A.numpy()
            #try:
            #    u,s,vnp=svd(ANP) # s are the singular values
            #    sl=s.tolist()
            #    mins=min(sl)  # the smallest singular value
            #    maxs=max(sl)
            #    if(silent>1):
            #        print "singular values = ",s
            #except LinAlgError:
            #    if(silent>0):
            #        print "could not compute SVD!"
            #        print "using abs(det) instead"
            #   mins=abs(d)
            #    maxs=abs(d)
            #if((mins>sv_min and maxs< sv_max)):
            zero = mpmath.mpi(0)
            if (zero not in di):
                if (silent > 1):
                    print "Adding D,r=", D, r
                basislist[num_gotten] = [D, r]
                num_gotten = num_gotten + 1
                if (num_gotten >= dim):
                    return basislist
                else:
                    #print "setting Aold to A"
                    Aold = A
            else:
                if (silent > 1):
                    print " do not use D,r=", D, r
            # endif
            mpmath.mp.dps = dold
    # endfor
    if (num_gotten < dim):
        raise ValueError, " did not find enough good elements for a basis list!"
Exemplo n.º 21
0
def gram_matrix(N,
                weight,
                prec=501,
                tol=1E-40,
                sv_min=1E-1,
                sv_max=1E15,
                bl=None,
                set_dim=None,
                force_prec=False):
    r""" Computes a matrix of p_{r,D}(r',D')
    for a basis of P_{r,D}, i.e. dim linearly independent P's
    INPUT: N      = Integer
           weight = Real
    OPTIONAL: 
           tol    = error bound for the Poincaré series
           sv_min = minimal allowed singular value when determining whether a given set is linarly independent or not.
           sv_max = maximally allowed singular value
           bl     = list of pairs (D_i,r_i) from which  we compute a matrix of coeffficients p_{D_i,r_i}(D_j,r_j)
        """
    # If we have supplied a list of D's and r's we make a gram matrix relative to these
    # otherwise we find a basis, i.e. linearly independent forms with correct dimension
    # find the dimension
    wt = '%.4f' % weight
    if (N < 10):
        stN = "0" + str(N)
    else:
        stN = str(N)
    v = dict()
    filename_work = "__N" + stN + "-" + wt + "--finding basis.txt"
    fp = open(filename_work, "write")
    fp.write("starting to find basis")
    fp.close()
    if (silent > 0):
        print "Forcing precision:", force_prec
    set_silence_level(0)
    if (bl <> None):
        dim = len(bl)
        l = bl
    else:
        if (set_dim <> None and set_dim > 0):
            dim = set_dim
        else:
            dim = dimension_jac_cusp_forms(int(weight + 0.5), N, -1)
        l = list_of_basis(N, weight, prec, tol, sv_min, sv_max, set_dim=dim)
    j = 0
    for [D, r] in l.values():
        for [Dp, rp] in l.values():
            # Recall that the gram matrix is symmetric. We need only compute the upper diagonal
            if (v.values().count([Dp, rp, D, r]) == 0):
                v[j] = [D, r, Dp, rp]
                j = j + 1
    # now v is a list we can get into computing coefficients
    # first we print the "gram data" (list of indices) to the file
    s = str(N) + ": (AI[" + str(N) + "],["
    indices = dict()
    for j in range(len(l)):
        Delta = l[j][0]
        r = l[j][1]
        diff = (r * r - Delta) % (4 * N)
        if (diff <> 0):
            raise ValueError, "ERROR r^2=%s not congruent to Delta=%s mod %s!" % (
                r * r, Delta, 4 * N)
        s = s + "(" + str(Delta) + "," + str(r) + ")"
        indices[j] = [Delta, r]
        if (j < len(l) - 1):
            s = s + ","
        else:
            s = s + "]),"
    s = s + "\n"
    if (silent > 0):
        print s + "\n"
    filename2 = "PS_Gramdata" + stN + "-" + wt + ".txt"
    fp = open(filename2, "write")
    fp.write(s)
    fp.close()
    try:
        os.remove(filename_work)
    except os.error:
        print "Could not remove file:", filename_work
        pass
    filename_work = "__N" + stN + "-" + wt + "--computing_gram_matrix.txt"
    fp = open(filename_work, "write")
    fp.write("")
    fp.close()
    #print "tol=",tol
    #set_silence_level(2)
    #print "force_prec(gram_mat)=",force_prec
    res = ps_coefficients_holomorphic_vec(N,
                                          weight,
                                          v,
                                          tol,
                                          prec,
                                          force_prec=force_prec)
    set_silence_level(0)

    res['indices'] = indices
    maxerr = 0.0
    for j in res['errs'].keys():
        tmperr = abs(res['errs'][j])
        #print "err(",j,")=",tmperr
        if (tmperr > maxerr):
            maxerr = tmperr
        # switch format for easier vewing
        res['errs'][j] = RR(tmperr)
    if (silent > 0):
        print "maxerr=", RR(maxerr)
    res['maxerr'] = maxerr
    wt_phalf = '%.4f' % (weight + 0.5)
    filename3 = "PS_Gramerr" + stN + "-" + wt + ".txt"
    fp = open(filename3, "write")
    wt
    s = "MAXERR[" + wt_phalf + "][" + stN + "]=" + str(RR(maxerr))
    fp.write(s)
    fp.close()
    if (res['ok']):
        Cps = res['data']
    else:
        print "Failed to compute Fourier coefficients!"
        return 0
    RF = RealField(prec)
    A = matrix(RF, dim)
    kappa = weight
    fourpi = RF(4.0) * pi.n(prec)
    one = RF(1.0)
    N4 = RF(4 * N)
    C = dict()
    if (silent > 1):
        print "v=", v
        print "dim=", dim
    lastix = 0
    # First set the upper right part of A
    for j in range(dim):
        ddim = dim - j
        if (silent > 1):
            print "j=", j, "ddim=", ddim, " lastix=", lastix
        for k in range(0, ddim):
            # need to scale with |D|^(k+0.5)
            if (silent > 1):
                print "k=", k
                print "lastix+k=", lastix + k
            mm = RF(abs(v[lastix + k][0])) / N4
            tmp = RF(mm**(weight - one))
            if (silent > 1):
                print "ddim+k=", ddim + k
            A[j, j + k] = Cps[lastix + k] * tmp
            C[v[lastix + k][0], v[lastix + k][1]] = Cps[lastix + k]
        lastix = lastix + k + 1
    # And add the lower triangular part to mak the matrix symmetric
    for j in range(dim):
        for k in range(0, j):
            A[j, k] = A[k, j]
    # And print the gram matrix
    res['matrix'] = A
    dold = mpmath.mp.dps
    mpmath.mp.dps = int(prec / 3.3)
    AInt = mpmath.matrix(int(A.nrows()), int(A.ncols()))
    AMp = mpmath.matrix(int(A.nrows()), int(A.ncols()))
    for ir in range(A.nrows()):
        for ik in range(A.ncols()):
            AInt[ir, ik] = mpmath.mpi(A[ir, ik] - tol, A[ir, ik] + tol)
            AMp[ir, ik] = mpmath.mpf(A[ir, ik])
    d = mpmath.det(AMp)
    if (silent > 1):
        print "det(A-as-mpmath)=", d
    di = mpmath.det(AInt)
    if (silent > 1):
        print "det(A-as-interval)=", di
    res['det'] = (RF(di.a), RF(di.b))

    filename = "PS_Gram" + stN + "-" + wt + ".txt"
    if (silent > 1):
        print "printing to file: " + filename
    print_matrix_to_file(A, filename, 'A[' + str(N) + ']')
    if (silent > 1):
        print "A-A.transpose()=", norm(A - A.transpose())
    B = A ^ -1
    #[d,B]=mat_inverse(A)
    if (silent > 1):
        print "A=", A.n(100)
        print "det(A)=", di
        print "Done making inverse!"
    #res['det']=d
    res['inv'] = B
    mpmath.mp.dps = dold
    filename = "PS_Gram-inv" + stN + "-" + wt + ".txt"
    print_matrix_to_file(B, filename, ' AI[' + str(N) + ']')
    # first make the filename
    s = '%.1e' % tol
    filename3 = "PS_Coeffs" + stN + "-" + wt + "-" + s + ".sobj"
    # If the file already exist we load it and append the new data
    if (silent > 0):
        print "saving data to ", filename3
    try:
        f = open(filename3, "read")
    except IOError:
        if (silent > 0):
            print "no file before!"
        # do nothing
    else:
        if (silent > 0):
            print "file: " + filename3 + " exists!"
        f.close()
        Cold = load(filename3)
        for key in Cold.keys():
            #                print"key:",key
            if (not C.has_key(key)):  # then we addd it
                print "key:", key, " does not exist in the new version!"
                C[key] = Cold[key]
                save(C, filename3)
    ## Save the whole thing
    filename = "PS_all_gram" + stN + "-" + wt + ".sobj"
    save(res, filename)
    ## our work is comleted and we can remove the file
    try:
        os.remove(filename_work)
    except os.error:
        print "Could not remove file:", filename_work
        pass
    return res
Exemplo n.º 22
0
def A(phi,xi):
    matA = [[fgamma(phi+3)*fpolylog(phi+2,xi),fgamma(phi+2)*fpolylog(phi+1,xi)],
            [fgamma(phi+2)*fpolylog(phi+1,xi),fgamma(phi+1)*fpolylog(phi,xi)]]
    return det(matA)
Exemplo n.º 23
0
def lyapunov_exponent(mathcalA, varphi, periodic_points, alg='basic', norm=2):

    k = max([len(word) for word in periodic_points])

    if alg == 'basic':
        approx_basic = []

        for n in range(1, k + 1):
            integral = sum([
                mpmath.log(mpmath.norm(cocycle(word, mathcalA), p=norm)) *
                weight(word, varphi) for word in periodic_points
                if len(word) == n
            ])
            normalization = sum([
                weight(word, varphi) for word in periodic_points
                if len(word) == n
            ])
            approx_basic.append(integral / (n * normalization))

        return approx_basic

    elif alg == 'pollicott':
        #Compute the operator trace for each periodic point
        op_trace = {
            word: operator_trace(cocycle(word, mathcalA), 0)[0]
            for word in periodic_points
        }
        op_trace_der = {
            word: operator_trace(cocycle(word, mathcalA), 0)[1]
            for word in periodic_points
        }

        #Compute traces for products of transfer operator put in dictionary indexed by power
        trace = [
            sum([(op_trace[word] * weight(word, varphi))
                 for word in periodic_points if len(word) == n])
            for n in range(1, k + 1)
        ]
        trace_der = [
            sum([(op_trace_der[word] * weight(word, varphi))
                 for word in periodic_points if len(word) == n])
            for n in range(1, k + 1)
        ]

        coefficients = [mpmath.mpf(1)]

        coefficients_der = [mpmath.mpf(0)]

        for n in range(1, k + 1):
            M = mpmath.matrix(n)
            Der_M = mpmath.matrix(n)
            for i in range(0, n):
                for j in range(0, n):
                    if j > i + 1:
                        M[i, j] = 0
                        Der_M[i, j] = 0
                    elif j == i + 1:
                        M[i, j] = n - j
                        Der_M[i, j] = 0
                    else:
                        M[i, j] = trace[i - j]
                        Der_M[i, j] = trace_der[i - j]

            coefficients.append((((-1)**n) / mpmath.fac(n)) * mpmath.det(M))

            if n == 1:
                coefficients_der.append(
                    (((-1)**n) / mpmath.fac(n)) * trace_der[0])
            else:
                #Use Jacobi's formula to compute derivative of coefficients
                coefficients_der.append(
                    (((-1)**n) / mpmath.fac(n)) * trace_of(adj(M) * Der_M))

        approximation = []

        for n in range(1, k + 1):
            approximation.append(
                sum([coefficients_der[m] for m in range(1, n + 1)]) /
                sum([m * coefficients[m] for m in range(1, n + 1)]))

        return approximation

    else:
        return "Choices of algorithm are 'basic' and 'pollicott'"
Exemplo n.º 24
0
def QSdet(mat):
    if QSMODE == MODE_NORM:
        return np.linalg.det(mat)
    else:
        return mpmath.det(mat)
Exemplo n.º 25
0
def QSdet(mat):
    if QSMODE == MODE_NORM:
        return np.linalg.det(mat)
    else:
        return mpmath.det(mat)
Exemplo n.º 26
0
def list_of_basis(N,weight,prec=501,tol=1e-20,sv_min=1E-1,sv_max=1E15,set_dim=None):
    r""" Returns a list of pairs (r,D) forming a basis
    """
    # First we find the smallest Discriminant for each of the components
    if(set_dim<>None and set_dim >0):
        dim=set_dim
    else:
        dim=dimension_jac_cusp_forms(int(weight+0.5),N,-1)
    basislist=dict()
    num_gotten=0
    co_tmp=dict()
    num_gotten=0
    C0=1
    RF=RealField(prec)
    if(silent>1):
        print "N=",N
        print "dim=",dim
        print "sv_min=",sv_min
        print "sv_max=",sv_max
    Aold=Matrix(RF,1)
    tol0=1E-20  #tol
    # we start with the first discriminant, then the second etc.
    Z2N=IntegerModRing(2*N)
    ZZ4N=IntegerModRing(4*N)
    for Dp in [1..max(1000,100*dim)]:
        D=-Dp # we use the dual of the Weil representation
        D4N=ZZ4N(D)
        if(not(is_square(D4N))):
            continue
        for r in my_modsqrt(D4N,N):
            # I want to make sure that P_{(D,r)} is independent from the previously computed functions
            # The only sure way to do this is to compute all submatrices (to a much smaller precision than what we want at the end)
            # The candidate is [D,r] and we need to compute the vector of [D,r,D',r']
            # for all D',r' already in the list
            ltmp1=dict()
            ltmp2=dict()
            j=0
            for [Dp,rp] in basislist.values():
                ltmp1[j]=[D,r,Dp,rp]
                ltmp2[j]=[Dp,rp,D,r]
                j=j+1
            ltmp1[j]=[D,r,D,r]
            #print "Checking: D,r,D,r=",ltmp1
            ctmp1=ps_coefficients_holomorphic_vec(N,weight,ltmp1,tol0)
            # print "ctmp1=",ctmp1
            if(j >0):
                #print "Checking: D,r,Dp,rp=",ltmp2    # Data is ok?: {0: True} 
                ctmp2=ps_coefficients_holomorphic_vec(N,weight,ltmp2,tol0)
                # print "ctmp2=",ctmp2
            #print "num_gotten=",num_gotten
            A=matrix(RF,num_gotten+1)
            # The old matrixc with the elements that are already added to the basis
            # print "Aold=\n",A,"\n"
            # print "num_gotten=",num_gotten
            # print "Aold=\n",Aold,"\n"
            for k in range(Aold.nrows()):
                for l in range(Aold.ncols()):
                    A[k,l]=Aold[k,l]
                    # endfor
                    # print "A set by old=\n",A,"\n"
                    # Add the (D',r',D,r) for each D',r' in the list
            tmp=RF(1.0)
            for l in range(num_gotten):                
                # we do not use  the scaling factor when
                # determining linear independence
                # mm=RF(abs(ltmp2[l][0]))/N4
                # tmp=RF(mm**(weight-one))
                A[num_gotten,l]=ctmp2['data'][l]*tmp
                # Add the (D,r,D',r') for each D',r' in the list
                # print "ctmp1.keys()=",ctmp1.keys()
            for l in range(num_gotten+1):
                #mm=RF(abs(ltmp1[l][2]))/4N
                #tmp=RF(mm**(weight-one))
                # print "scaled with=",tmp.n(200)
                A[l,num_gotten]=ctmp1['data'][l]*tmp
            #[d,B]=mat_inverse(A) # d=det(A) 
            #if(silent>1):
            #d=det(A)
            #print "det A = ",d
            # Now we have to determine whether we have a linearly independent set or not
            dold=mpmath.mp.dps
            mpmath.mp.dps=int(prec/3.3)
            AInt=mpmath.matrix(int(A.nrows()),int(A.ncols()))
            AMp=mpmath.matrix(int(A.nrows()),int(A.ncols()))
            if(silent>0):
                print "tol0=",tol0
            for ir in range(A.nrows()):
                for ik in range(A.ncols()):
                    AInt[ir,ik]=mpmath.mp.mpi(A[ir,ik]-tol0,A[ir,ik]+tol0)
                    AMp[ir,ik]=mpmath.mpf(A[ir,ik])

            d=mpmath.det(AMp)
            di=mpmath.mp.mpi(mpmath.mp.det(AInt))
            #for ir in range(A.nrows()):
            #    for ik in range(A.ncols()):
            #        #print "A.d=",AInt[ir,ik].delta
            if(silent>0):
                print "mpmath.mp.dps=",mpmath.mp.dps
                print "det(A)=",d
                print "det(A-as-interval)=",di
                print "d.delta=",di.delta
            #if(not mpmath.mpi(d) in di):
            #    raise ArithmeticError," Interval determinant not ok?"
            #ANP=A.numpy()
            #try: 
            #    u,s,vnp=svd(ANP) # s are the singular values
            #    sl=s.tolist()
            #    mins=min(sl)  # the smallest singular value
            #    maxs=max(sl)
            #    if(silent>1):
            #        print "singular values = ",s
            #except LinAlgError:
            #    if(silent>0):
            #        print "could not compute SVD!"
            #        print "using abs(det) instead"
            #   mins=abs(d)
            #    maxs=abs(d)
            #if((mins>sv_min and maxs< sv_max)): 
            zero=mpmath.mpi(0)
            if(zero not in di):
                if(silent>1):
                    print "Adding D,r=",D,r
                basislist[num_gotten]=[D,r]
                num_gotten=num_gotten+1
                if(num_gotten>=dim):
                    return basislist
                else:
                    #print "setting Aold to A"
                    Aold=A
            else:
                if(silent>1):
                    print " do not use D,r=",D,r
            # endif
            mpmath.mp.dps=dold
    # endfor
    if(num_gotten < dim):
        raise ValueError," did not find enough good elements for a basis list!"