def _mul_(self, other): """ Multiply this Hecke operator by another element of the same algebra. If the other element is of the form `T_m` for some m, we check whether the product is equal to `T_{mn}` and return that; if the product is not (easily seen to be) of the form `T_{mn}`, then we calculate the product of the two matrices and return a Hecke algebra element defined by that. EXAMPLES: We create the space of modular symbols of level `11` and weight `2`, then compute `T_2` and `T_3` on it, along with their composition. :: sage: M = ModularSymbols(11) sage: t2 = M.hecke_operator(2); t3 = M.hecke_operator(3) sage: t2*t3 # indirect doctest Hecke operator T_6 on Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field sage: t3.matrix() * t2.matrix() [12 0 -2] [ 0 2 0] [ 0 0 2] sage: (t2*t3).matrix() [12 0 -2] [ 0 2 0] [ 0 0 2] When we compute `T_2^5` the result is not (easily seen to be) a Hecke operator of the form `T_n`, so it is returned as a Hecke module homomorphism defined as a matrix:: sage: t2**5 Hecke operator on Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field defined by: [243 0 -55] [ 0 -32 0] [ 0 0 -32] """ if isinstance(other, HeckeOperator) and other.parent() == self.parent(): n = None if arith.gcd(self.__n, other.__n) == 1: n = self.__n * other.__n else: P = set(arith.prime_divisors(self.domain().level())) if P.issubset(set(arith.prime_divisors(self.__n))) and \ P.issubset(set(arith.prime_divisors(other.__n))): n = self.__n * other.__n if n: return HeckeOperator(self.parent(), n) # otherwise return self.matrix_form() * other
def CS_genus_symbol_list(self, force_recomputation=False): """ Returns the list of Conway-Sloane genus symbols in increasing order of primes dividing 2*det. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,2,3,4]) sage: Q.CS_genus_symbol_list() [Genus symbol at 2 : [[1, 2, 3, 1, 4], [2, 1, 1, 1, 1], [3, 1, 1, 1, 1]], Genus symbol at 3 : [[0, 3, 1], [1, 1, -1]]] """ ## Try to use the cached list if force_recomputation == False: try: return self.__CS_genus_symbol_list except AttributeError: pass ## Otherwise recompute and cache the list list_of_CS_genus_symbols = [ ] for p in prime_divisors(2 * self.det()): list_of_CS_genus_symbols.append(self.local_genus_symbol(p)) self.__CS_genus_symbol_list = list_of_CS_genus_symbols return list_of_CS_genus_symbols
def CS_genus_symbol_list(self, force_recomputation=False): """ Returns the list of Conway-Sloane genus symbols in increasing order of primes dividing 2*det. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,2,3,4]) sage: Q.CS_genus_symbol_list() [Genus symbol at 2: [2^-2 4^1 8^1]_6, Genus symbol at 3: 1^3 3^-1] """ ## Try to use the cached list if not force_recomputation: try: return self.__CS_genus_symbol_list except AttributeError: pass ## Otherwise recompute and cache the list list_of_CS_genus_symbols = [ ] for p in prime_divisors(2 * self.det()): list_of_CS_genus_symbols.append(self.local_genus_symbol(p)) self.__CS_genus_symbol_list = list_of_CS_genus_symbols return list_of_CS_genus_symbols
def is_locally_equivalent_to(self, other, check_primes_only=False, force_jordan_equivalence_test=False): """ Determines if the current quadratic form (defined over ZZ) is locally equivalent to the given form over the real numbers and the `p`-adic integers for every prime p. This works by comparing the local Jordan decompositions at every prime, and the dimension and signature at the real place. INPUT: a QuadraticForm OUTPUT: boolean EXAMPLES:: sage: Q1 = QuadraticForm(ZZ, 3, [1, 0, -1, 2, -1, 5]) sage: Q2 = QuadraticForm(ZZ, 3, [2, 1, 2, 2, 1, 3]) sage: Q1.is_globally_equivalent_to(Q2) False sage: Q1.is_locally_equivalent_to(Q2) True """ ## TO IMPLEMENT: if self.det() == 0: raise NotImplementedError("OOps! We need to think about whether this still works for degenerate forms... especially check the signature.") ## Check that both forms have the same dimension and base ring if (self.dim() != other.dim()) or (self.base_ring() != other.base_ring()): return False ## Check that the determinant and level agree if (self.det() != other.det()) or (self.level() != other.level()): return False ## ----------------------------------------------------- ## Test equivalence over the real numbers if self.signature() != other.signature(): return False ## Test equivalence over Z_p for all primes if (self.base_ring() == ZZ) and (force_jordan_equivalence_test == False): ## Test equivalence with Conway-Sloane genus symbols (default over ZZ) if self.CS_genus_symbol_list() != other.CS_genus_symbol_list(): return False else: ## Test equivalence via the O'Meara criterion. for p in prime_divisors(ZZ(2) * self.det()): #print "checking the prime p = ", p if not self.has_equivalent_Jordan_decomposition_at_prime(other, p): return False ## All tests have passed! return True
def is_locally_equivalent_to(self, other, check_primes_only=False, force_jordan_equivalence_test=False): """ Determines if the current quadratic form (defined over ZZ) is locally equivalent to the given form over the real numbers and the `p`-adic integers for every prime p. This works by comparing the local Jordan decompositions at every prime, and the dimension and signature at the real place. INPUT: a QuadraticForm OUTPUT: boolean EXAMPLES:: sage: Q1 = QuadraticForm(ZZ, 3, [1, 0, -1, 2, -1, 5]) sage: Q2 = QuadraticForm(ZZ, 3, [2, 1, 2, 2, 1, 3]) sage: Q1.is_globally_equivalent_to(Q2) False sage: Q1.is_locally_equivalent_to(Q2) True """ ## TO IMPLEMENT: if self.det() == 0: raise NotImplementedError("OOps! We need to think about whether this still works for degenerate forms... especially check the signature.") ## Check that both forms have the same dimension and base ring if (self.dim() != other.dim()) or (self.base_ring() != other.base_ring()): return False ## Check that the determinant and level agree if (self.det() != other.det()) or (self.level() != other.level()): return False ## ----------------------------------------------------- ## Test equivalence over the real numbers if self.signature() != other.signature(): return False ## Test equivalence over Z_p for all primes if (self.base_ring() == ZZ) and (force_jordan_equivalence_test == False): ## Test equivalence with Conway-Sloane genus symbols (default over ZZ) if self.CS_genus_symbol_list() != other.CS_genus_symbol_list(): return False else: ## Test equivalence via the O'Meara criterion. for p in prime_divisors(ZZ(2) * self.det()): if not self.has_equivalent_Jordan_decomposition_at_prime(other, p): return False ## All tests have passed! return True
def anisotropic_primes(self): """ Returns a list with all of the anisotropic primes of the quadratic form. INPUT: None OUTPUT: Returns a list of prime numbers >0. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1]) sage: Q.anisotropic_primes() [2] :: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1]) sage: Q.anisotropic_primes() [2] :: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1,1]) sage: Q.anisotropic_primes() [] """ ## Look at all prime divisors of 2 * Det(Q) to find the anisotropic primes... possible_primes = prime_divisors(2 * self.det()) AnisoPrimes = [] ## DIAGNSOTIC #print " Possible anisotropic primes are: " + str(possible_primes) for p in possible_primes: if (self.is_anisotropic(p)): AnisoPrimes += [p] ## DIAGNSOTIC #print " leaving anisotropic_primes..." return AnisoPrimes
def degeneracy_matrix(self, p=None): if self.level().is_prime(): return matrix(QQ, self.dimension(), 0, sparse=True) if p is None: A = None for p in prime_divisors(self._level): if A is None: A = self.degeneracy_matrix(p) else: A = A.augment(self.degeneracy_matrix(p)) return A p = ideal(p) if p in self._degeneracy_matrices: return self._degeneracy_matrices[p] d = self._icosians_mod_p1.degeneracy_matrix(p) d.set_immutable() self._degeneracy_matrices[p] = d return d
def degeneracy_matrix(self, p=None): if self.level().is_prime(): return matrix(QQ, self.dimension(), 0, sparse=True) if p is None: A = None for p in prime_divisors(self._level): if A is None: A = self.degeneracy_matrix(p) else: A = A.augment(self.degeneracy_matrix(p)) return A p = ideal(p) if self._degeneracy_matrices.has_key(p): return self._degeneracy_matrices[p] d = self._icosians_mod_p1.degeneracy_matrix(p) d.set_immutable() self._degeneracy_matrices[p] = d return d
def anisotropic_primes(self): """ Return a list with all of the anisotropic primes of the quadratic form. The infinite place is denoted by `-1`. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1]) sage: Q.anisotropic_primes() [2, -1] sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1]) sage: Q.anisotropic_primes() [2, -1] sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1,1]) sage: Q.anisotropic_primes() [-1] """ # Look at all prime divisors of 2 * Det(Q) to find the # anisotropic primes... possible_primes = prime_divisors(2 * self.det()) + [-1] return [p for p in possible_primes if self.is_anisotropic(p)]
def mass__by_Siegel_densities(self, odd_algorithm="Pall", even_algorithm="Watson"): """ Gives the mass of transformations (det 1 and -1). WARNING: THIS IS BROKEN RIGHT NOW... =( Optional Arguments: - When p > 2 -- odd_algorithm = "Pall" (only one choice for now) - When p = 2 -- even_algorithm = "Kitaoka" or "Watson" REFERENCES: - Nipp's Book "Tables of Quaternary Quadratic Forms". - Papers of Pall (only for p>2) and Watson (for `p=2` -- tricky!). - Siegel, Milnor-Hussemoller, Conway-Sloane Paper IV, Kitoaka (all of which have problems...) EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1]) sage: m = Q.mass__by_Siegel_densities(); m 1/384 sage: m - (2^Q.dim() * factorial(Q.dim()))^(-1) 0 :: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1]) sage: m = Q.mass__by_Siegel_densities(); m 1/48 sage: m - (2^Q.dim() * factorial(Q.dim()))^(-1) 0 """ ## Setup n = self.dim() s = (n-1) // 2 if n % 2 != 0: char_d = squarefree_part(2*self.det()) ## Accounts for the det as a QF else: char_d = squarefree_part(self.det()) ## Form the generic zeta product generic_prod = ZZ(2) * (pi)**(-ZZ(n) * (n+1) / 4) ########################################## generic_prod *= (self.det())**(ZZ(n+1)/2) ## ***** This uses the Hessian Determinant ******** ########################################## #print "gp1 = ", generic_prod generic_prod *= prod([gamma__exact(ZZ(j)/2) for j in range(1,n+1)]) #print "\n---", [(ZZ(j)/2, gamma__exact(ZZ(j)/2)) for j in range(1,n+1)] #print "\n---", prod([gamma__exact(ZZ(j)/2) for j in range(1,n+1)]) #print "gp2 = ", generic_prod generic_prod *= prod([zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)]) #print "\n---", [zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)] #print "\n---", prod([zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)]) #print "gp3 = ", generic_prod if (n % 2 == 0): generic_prod *= quadratic_L_function__exact(n//2, ZZ(-1)**(n//2) * char_d) #print " NEW = ", ZZ(1) * quadratic_L_function__exact(n/2, (-1)**(n/2) * char_d) #print #print "gp4 = ", generic_prod #print "generic_prod =", generic_prod ## Determine the adjustment factors adj_prod = ZZ.one() for p in prime_divisors(2 * self.det()): ## Cancel out the generic factors p_adjustment = prod([1 - ZZ(p)**(-j) for j in range(2, 2*s+1, 2)]) if (n % 2 == 0): p_adjustment *= (1 - kronecker((-1)**(n//2) * char_d, p) * ZZ(p)**(-n//2)) #print " EXTRA = ", ZZ(1) * (1 - kronecker((-1)**(n/2) * char_d, p) * ZZ(p)**(-n/2)) #print "Factor to cancel the generic one:", p_adjustment ## Insert the new mass factors if p == 2: if even_algorithm == "Kitaoka": p_adjustment = p_adjustment / self.Kitaoka_mass_at_2() elif even_algorithm == "Watson": p_adjustment = p_adjustment / self.Watson_mass_at_2() else: raise TypeError("There is a problem -- your even_algorithm argument is invalid. Try again. =(") else: if odd_algorithm == "Pall": p_adjustment = p_adjustment / self.Pall_mass_density_at_odd_prime(p) else: raise TypeError("There is a problem -- your optional arguments are invalid. Try again. =(") #print "p_adjustment for p =", p, "is", p_adjustment ## Put them together (cumulatively) adj_prod *= p_adjustment #print "Cumulative adj_prod =", adj_prod ## Extra adjustment for the case of a 2-dimensional form. #if (n == 2): # generic_prod *= 2 ## Return the mass mass = generic_prod * adj_prod return mass
def prove_BSD(E, verbosity=0, two_desc='mwrank', proof=None, secs_hi=5, return_BSD=False): r""" Attempts to prove the Birch and Swinnerton-Dyer conjectural formula for `E`, returning a list of primes `p` for which this function fails to prove BSD(E,p). Here, BSD(E,p) is the statement: "the Birch and Swinnerton-Dyer formula holds up to a rational number coprime to `p`." INPUT: - ``E`` - an elliptic curve - ``verbosity`` - int, how much information about the proof to print. - 0 - print nothing - 1 - print sketch of proof - 2 - print information about remaining primes - ``two_desc`` - string (default ``'mwrank'``), what to use for the two-descent. Options are ``'mwrank', 'simon', 'sage'`` - ``proof`` - bool or None (default: None, see proof.elliptic_curve or sage.structure.proof). If False, this function just immediately returns the empty list. - ``secs_hi`` - maximum number of seconds to try to compute the Heegner index before switching over to trying to compute the Heegner index bound. (Rank 0 only!) - ``return_BSD`` - bool (default: False) whether to return an object which contains information to reconstruct a proof NOTE: When printing verbose output, phrases such as "by Mazur" are referring to the following list of papers: REFERENCES: .. [Cha] \B. Cha. Vanishing of some cohomology goups and bounds for the Shafarevich-Tate groups of elliptic curves. J. Number Theory, 111:154- 178, 2005. .. [Jetchev] \D. Jetchev. Global divisibility of Heegner points and Tamagawa numbers. Compos. Math. 144 (2008), no. 4, 811--826. .. [Kato] \K. Kato. p-adic Hodge theory and values of zeta functions of modular forms. Astérisque, (295):ix, 117-290, 2004. .. [Kolyvagin] \V. A. Kolyvagin. On the structure of Shafarevich-Tate groups. Algebraic geometry, 94--121, Lecture Notes in Math., 1479, Springer, Berlin, 1991. .. [LawsonWuthrich] \T. Lawson and C. Wuthrich, Vanishing of some Galois cohomology groups for elliptic curves, :arxiv:`1505.02940` .. [LumStein] \A. Lum, W. Stein. Verification of the Birch and Swinnerton-Dyer Conjecture for Elliptic Curves with Complex Multiplication (unpublished) .. [Mazur] \B. Mazur. Modular curves and the Eisenstein ideal. Inst. Hautes Études Sci. Publ. Math. No. 47 (1977), 33--186 (1978). .. [Rubin] \K. Rubin. The "main conjectures" of Iwasawa theory for imaginary quadratic fields. Invent. Math. 103 (1991), no. 1, 25--68. .. [SteinWuthrich] \W. Stein and C. Wuthrich, Algorithms for the Arithmetic of Elliptic Curves using Iwasawa Theory Mathematics of Computation 82 (2013), 1757-1792. .. [SteinEtAl] \G. Grigorov, A. Jorza, S. Patrikis, W. Stein, C. Tarniţǎ. Computational verification of the Birch and Swinnerton-Dyer conjecture for individual elliptic curves. Math. Comp. 78 (2009), no. 268, 2397--2425. EXAMPLES:: sage: EllipticCurve('11a').prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 5} by Kolyvagin. Kolyvagin's bound for p = 5 applies by Lawson-Wuthrich True for p = 5 by Kolyvagin bound [] sage: EllipticCurve('14a').prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. Kolyvagin's bound for p = 3 applies by Lawson-Wuthrich True for p = 3 by Kolyvagin bound [] sage: E = EllipticCurve("20a1") sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. Kato further implies that #Sha[3] is trivial. [] sage: E = EllipticCurve("50b1") sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3, 5} by Kolyvagin. Kolyvagin's bound for p = 3 applies by Lawson-Wuthrich True for p = 3 by Kolyvagin bound Remaining primes: p = 5: reducible, not surjective, additive, divides a Tamagawa number (no bounds found) ord_p(#Sha_an) = 0 [5] sage: E.prove_BSD(two_desc='simon') [5] A rank two curve:: sage: E = EllipticCurve('389a') We know nothing with proof=True:: sage: E.prove_BSD() Set of all prime numbers: 2, 3, 5, 7, ... We (think we) know everything with proof=False:: sage: E.prove_BSD(proof=False) [] A curve of rank 0 and prime conductor:: sage: E = EllipticCurve('19a') sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. Kolyvagin's bound for p = 3 applies by Lawson-Wuthrich True for p = 3 by Kolyvagin bound [] sage: E = EllipticCurve('37a') sage: E.rank() 1 sage: E._EllipticCurve_rational_field__rank (1, True) sage: E.analytic_rank = lambda : 0 sage: E.prove_BSD() Traceback (most recent call last): ... RuntimeError: It seems that the rank conjecture does not hold for this curve (Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field)! This may be a counterexample to BSD, but is more likely a bug. We test the consistency check for the 2-part of Sha:: sage: E = EllipticCurve('37a') sage: S = E.sha(); S Tate-Shafarevich group for the Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field sage: def foo(use_database): ....: return 4 sage: S.an = foo sage: E.prove_BSD() Traceback (most recent call last): ... RuntimeError: Apparent contradiction: 0 <= rank(sha[2]) <= 0, but ord_2(sha_an) = 2 An example with a Tamagawa number at 5:: sage: E = EllipticCurve('123a1') sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 5} by Kolyvagin. Remaining primes: p = 5: reducible, not surjective, good ordinary, divides a Tamagawa number (no bounds found) ord_p(#Sha_an) = 0 [5] A curve for which 3 divides the order of the Tate-Shafarevich group:: sage: E = EllipticCurve('681b') sage: E.prove_BSD(verbosity=2) # long time p = 2: True by 2-descent... True for p not in {2, 3} by Kolyvagin.... Remaining primes: p = 3: irreducible, surjective, non-split multiplicative (0 <= ord_p <= 2) ord_p(#Sha_an) = 2 [3] A curve for which we need to use ``heegner_index_bound``:: sage: E = EllipticCurve('198b') sage: E.prove_BSD(verbosity=1, secs_hi=1) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. [3] The ``return_BSD`` option gives an object with detailed information about the proof:: sage: E = EllipticCurve('26b') sage: B = E.prove_BSD(return_BSD=True) sage: B.two_tor_rk 0 sage: B.N 26 sage: B.gens [] sage: B.primes [] sage: B.heegner_indexes {-23: 2} TESTS: This was fixed by :trac:`8184` and :trac:`7575`:: sage: EllipticCurve('438e1').prove_BSD(verbosity=1) p = 2: True by 2-descent... True for p not in {2} by Kolyvagin. [] :: sage: E = EllipticCurve('960d1') sage: E.prove_BSD(verbosity=1) # long time (4s on sage.math, 2011) p = 2: True by 2-descent True for p not in {2} by Kolyvagin. [] """ if proof is None: from sage.structure.proof.proof import get_flag proof = get_flag(proof, "elliptic_curve") else: proof = bool(proof) if not proof: return [] from copy import copy BSD = BSD_data() # We replace this curve by the optimal curve, which we can do since # truth of BSD(E,p) is invariant under isogeny. BSD.curve = E.optimal_curve() if BSD.curve.has_cm(): # ensure that CM is by a maximal order non_max_j_invs = [-12288000, 54000, 287496, 16581375] if BSD.curve.j_invariant() in non_max_j_invs: # is this possible for optimal curves? if verbosity > 0: print('CM by non maximal order: switching curves') for E in BSD.curve.isogeny_class(): if E.j_invariant() not in non_max_j_invs: BSD.curve = E break BSD.update() galrep = BSD.curve.galois_representation() if two_desc=='mwrank': M = mwrank_two_descent_work(BSD.curve, BSD.two_tor_rk) elif two_desc=='simon': M = simon_two_descent_work(BSD.curve, BSD.two_tor_rk) elif two_desc=='sage': M = native_two_isogeny_descent_work(BSD.curve, BSD.two_tor_rk) else: raise NotImplementedError() rank_lower_bd, rank_upper_bd, sha2_lower_bd, sha2_upper_bd, gens = M assert sha2_lower_bd <= sha2_upper_bd if gens is not None: gens = BSD.curve.saturation(gens)[0] if rank_lower_bd > rank_upper_bd: raise RuntimeError("Apparent contradiction: %d <= rank <= %d."%(rank_lower_bd, rank_upper_bd)) BSD.two_selmer_rank = rank_upper_bd + sha2_lower_bd + BSD.two_tor_rk if sha2_upper_bd == sha2_lower_bd: BSD.rank = rank_lower_bd BSD.bounds[2] = (sha2_lower_bd, sha2_upper_bd) else: BSD.rank = BSD.curve.rank(use_database=True) sha2_upper_bd -= (BSD.rank - rank_lower_bd) BSD.bounds[2] = (sha2_lower_bd, sha2_upper_bd) if verbosity > 0: print("Unable to compute the rank exactly -- used database.") if rank_lower_bd > 1: # We do not know BSD(E,p) for even a single p, since it's # an open problem to show that L^r(E,1)/(Reg*Omega) is # rational for any curve with r >= 2. from sage.sets.all import Primes BSD.primes = Primes() if return_BSD: BSD.rank = rank_lower_bd return BSD return BSD.primes if (BSD.sha_an.ord(2) == 0) != (BSD.bounds[2][1] == 0): raise RuntimeError("Apparent contradiction: %d <= rank(sha[2]) <= %d, but ord_2(sha_an) = %d"%(sha2_lower_bd, sha2_upper_bd, BSD.sha_an.ord(2))) if BSD.bounds[2][0] == BSD.sha_an.ord(2) and BSD.sha_an.ord(2) == BSD.bounds[2][1]: if verbosity > 0: print('p = 2: True by 2-descent') BSD.primes = [] BSD.bounds.pop(2) BSD.proof[2] = ['2-descent'] else: BSD.primes = [2] BSD.proof[2] = [('2-descent',)+BSD.bounds[2]] if len(gens) > rank_lower_bd or \ rank_lower_bd > rank_upper_bd: raise RuntimeError("Something went wrong with 2-descent.") if BSD.rank != len(gens): gens = BSD.curve.gens(proof=True) if BSD.rank != len(gens): raise RuntimeError("Could not get generators") BSD.gens = [BSD.curve.point(x, check=True) for x in gens] if BSD.rank != BSD.curve.analytic_rank(): raise RuntimeError("It seems that the rank conjecture does not hold for this curve (%s)! This may be a counterexample to BSD, but is more likely a bug."%(BSD.curve)) # reduce set of remaining primes to a finite set import signal kolyvagin_primes = [] heegner_index = None if BSD.rank == 0: for D in BSD.curve.heegner_discriminants_list(10): max_height = max(13,BSD.curve.quadratic_twist(D).CPS_height_bound()) heegner_primes = -1 while heegner_primes == -1: if max_height > 21: break heegner_primes, _, exact = BSD.curve.heegner_index_bound(D, max_height=max_height) max_height += 1 if isinstance(heegner_primes, list): break if not isinstance(heegner_primes, list): raise RuntimeError("Tried 10 Heegner discriminants, and heegner_index_bound failed each time.") if exact is not False: heegner_index = exact BSD.heegner_indexes[D] = exact else: BSD.heegner_index_upper_bound[D] = max(heegner_primes+[1]) if 2 in heegner_primes: heegner_primes.remove(2) else: # rank 1 for D in BSD.curve.heegner_discriminants_list(10): I = BSD.curve.heegner_index(D) J = I.is_int() if J[0] and J[1]>0: I = J[1] else: J = (2*I).is_int() if J[0] and J[1]>0: I = J[1] else: continue heegner_index = I BSD.heegner_indexes[D] = I break heegner_primes = [p for p in arith.prime_divisors(heegner_index) if p!=2] assert BSD.sha_an in ZZ and BSD.sha_an > 0 if BSD.curve.has_cm(): if BSD.curve.analytic_rank() == 0: if verbosity > 0: print(' p >= 5: true by Rubin') BSD.primes.append(3) else: K = rings.QuadraticField(BSD.curve.cm_discriminant(), 'a') D_K = K.disc() D_E = BSD.curve.discriminant() if len(K.factor(3)) == 1: # 3 does not split in K BSD.primes.append(3) for p in arith.prime_divisors(D_K): if p >= 5: BSD.primes.append(p) for p in arith.prime_divisors(D_E): if p >= 5 and D_K%p and len(K.factor(p)) == 1: # p is inert in K BSD.primes.append(p) for p in heegner_primes: if p >= 5 and D_E%p != 0 and D_K%p != 0 and len(K.factor(p)) == 1: # p is good for E and inert in K kolyvagin_primes.append(p) for p in arith.prime_divisors(BSD.sha_an): if p >= 5 and D_K%p != 0 and len(K.factor(p)) == 1: if BSD.curve.is_good(p): if verbosity > 2 and p in heegner_primes and heegner_index is None: print('ALERT: Prime p (%d) >= 5 dividing sha_an, good for E, inert in K, in heegner_primes, should not divide the actual Heegner index') # Note that the following check is not entirely # exhaustive, in case there is a p not dividing # the Heegner index in heegner_primes, # for which only an outer bound was computed if p not in heegner_primes: raise RuntimeError("p = %d divides sha_an, is of good reduction for E, inert in K, and does not divide the Heegner index. This may be a counterexample to BSD, but is more likely a bug. %s"%(p,BSD.curve)) if verbosity > 0: print('True for p not in {%s} by Kolyvagin (via Stein & Lum -- unpublished) and Rubin.' % str(list(set(BSD.primes).union(set(kolyvagin_primes))))[1:-1]) BSD.proof['finite'] = copy(BSD.primes) else: # no CM # do some tricks to get to a finite set without calling bound_kolyvagin BSD.primes += [p for p in galrep.non_surjective() if p != 2] for p in heegner_primes: if p not in BSD.primes: BSD.primes.append(p) for p in arith.prime_divisors(BSD.sha_an): if p not in BSD.primes and p != 2: BSD.primes.append(p) if verbosity > 0: s = str(BSD.primes)[1:-1] if 2 not in BSD.primes: if len(s) == 0: s = '2' else: s = '2, '+s print('True for p not in {' + s + '} by Kolyvagin.') BSD.proof['finite'] = copy(BSD.primes) primes_to_remove = [] for p in BSD.primes: if p == 2: continue if galrep.is_surjective(p) and not BSD.curve.has_additive_reduction(p): if BSD.curve.has_nonsplit_multiplicative_reduction(p): if BSD.rank > 0: continue if p==3: if (not (BSD.curve.is_ordinary(p) and BSD.curve.is_good(p))) and (not BSD.curve.has_split_multiplicative_reduction(p)): continue if BSD.rank > 0: continue if verbosity > 1: print(' p = %d: Trying p_primary_bound' % p) p_bound = BSD.Sha.p_primary_bound(p) if p in BSD.proof: BSD.proof[p].append(('Stein-Wuthrich', p_bound)) else: BSD.proof[p] = [('Stein-Wuthrich', p_bound)] if BSD.sha_an.ord(p) == 0 and p_bound == 0: if verbosity > 0: print('True for p=%d by Stein-Wuthrich.' % p) primes_to_remove.append(p) else: if p in BSD.bounds: BSD.bounds[p][1] = min(BSD.bounds[p][1], p_bound) else: BSD.bounds[p] = (0, p_bound) print('Analytic %d-rank is '%p + str(BSD.sha_an.ord(p)) + ', actual %d-rank is at most %d.' % (p, p_bound)) print(' by Stein-Wuthrich.\n') for p in primes_to_remove: BSD.primes.remove(p) kolyvagin_primes = [] for p in BSD.primes: if p == 2: continue if galrep.is_surjective(p): kolyvagin_primes.append(p) for p in kolyvagin_primes: BSD.primes.remove(p) # apply other hypotheses which imply Kolyvagin's bound holds bounded_primes = [] D_K = rings.QuadraticField(D, 'a').disc() # Cha's hypothesis for p in BSD.primes: if p == 2: continue if D_K%p != 0 and BSD.N%(p**2) != 0 and galrep.is_irreducible(p): if verbosity > 0: print('Kolyvagin\'s bound for p = %d applies by Cha.' % p) if p in BSD.proof: BSD.proof[p].append('Cha') else: BSD.proof[p] = ['Cha'] kolyvagin_primes.append(p) # Stein et al replaced for p in BSD.primes: # the lemma about the vanishing of H^1 is false in Stein et al for p=5 and 11 # here is the correction from Lawson-Wuthrich. Especially Theorem 14 in # [LawsonWuthrich] above. if p in kolyvagin_primes or p == 2 or D_K % p == 0: continue crit_lw = False if p > 11 or p == 7: crit_lw = True elif p == 11: if BSD.N != 121 or BSD.curve.label() != "121c2": crit_lw = True elif galrep.is_irreducible(p): crit_lw = True else: phis = BSD.curve.isogenies_prime_degree(p) if len(phis) != 1: crit_lw = True else: C = phis[0].codomain() if p == 3: if BSD.curve.torsion_order() % p != 0 and C.torsion_order() % p != 0: crit_lw = True else: # p == 5 Et = BSD.curve.quadratic_twist(5) if Et.torsion_order() % p != 0 and C.torsion_order() % p != 0: crite_lw = True if crit_lw: if verbosity > 0: print('Kolyvagin\'s bound for p = %d applies by Lawson-Wuthrich' % p) kolyvagin_primes.append(p) if p in BSD.proof: BSD.proof[p].append('Lawson-Wuthrich') else: BSD.proof[p] = ['Lawson-Wuthrich'] for p in kolyvagin_primes: if p in BSD.primes: BSD.primes.remove(p) # apply Kolyvagin's bound primes_to_remove = [] for p in kolyvagin_primes: if p == 2: continue if p not in heegner_primes: ord_p_bound = 0 elif heegner_index is not None: # p must divide heegner_index ord_p_bound = 2*heegner_index.ord(p) # Here Jetchev's results apply. m_max = max([BSD.curve.tamagawa_number(q).ord(p) for q in BSD.N.prime_divisors()]) if m_max > 0: if verbosity > 0: print('Jetchev\'s results apply (at p = %d) with m_max =' % p, m_max) if p in BSD.proof: BSD.proof[p].append(('Jetchev',m_max)) else: BSD.proof[p] = [('Jetchev',m_max)] ord_p_bound -= 2*m_max else: # Heegner index is None for D in BSD.heegner_index_upper_bound: M = BSD.heegner_index_upper_bound[D] ord_p_bound = 0 while p**(ord_p_bound+1) <= M**2: ord_p_bound += 1 # now ord_p_bound is one on I_K!!! ord_p_bound *= 2 # by Kolyvagin, now ord_p_bound is one on #Sha break if p in BSD.proof: BSD.proof[p].append(('Kolyvagin',ord_p_bound)) else: BSD.proof[p] = [('Kolyvagin',ord_p_bound)] if BSD.sha_an.ord(p) == 0 and ord_p_bound == 0: if verbosity > 0: print('True for p = %d by Kolyvagin bound' % p) primes_to_remove.append(p) elif BSD.sha_an.ord(p) > ord_p_bound: raise RuntimeError("p = %d: ord_p_bound == %d, but sha_an.ord(p) == %d. This appears to be a counterexample to BSD, but is more likely a bug."%(p,ord_p_bound,BSD.sha_an.ord(p))) else: # BSD.sha_an.ord(p) <= ord_p_bound != 0: if p in BSD.bounds: low = BSD.bounds[p][0] BSD.bounds[p] = (low, min(BSD.bounds[p][1], ord_p_bound)) else: BSD.bounds[p] = (0, ord_p_bound) for p in primes_to_remove: kolyvagin_primes.remove(p) BSD.primes = list( set(BSD.primes).union(set(kolyvagin_primes)) ) # Kato's bound if BSD.rank == 0 and not BSD.curve.has_cm(): L_over_Omega = BSD.curve.lseries().L_ratio() kato_primes = BSD.Sha.bound_kato() primes_to_remove = [] for p in BSD.primes: if p == 2: continue if p not in kato_primes: if verbosity > 0: print('Kato further implies that #Sha[%d] is trivial.' % p) primes_to_remove.append(p) if p in BSD.proof: BSD.proof[p].append(('Kato',0)) else: BSD.proof[p] = [('Kato',0)] if p not in [2,3] and BSD.N%p != 0: if galrep.is_surjective(p): bd = L_over_Omega.valuation(p) if verbosity > 1: print('Kato implies that ord_p(#Sha[%d]) <= %d ' % (p, bd)) if p in BSD.proof: BSD.proof[p].append(('Kato',bd)) else: BSD.proof[p] = [('Kato',bd)] if p in BSD.bounds: low = BSD.bounds[p][0] BSD.bounds[p][1] = (low, min(BSD.bounds[p][1], bd)) else: BSD.bounds[p] = (0, bd) for p in primes_to_remove: BSD.primes.remove(p) # Mazur primes_to_remove = [] if BSD.N.is_prime(): for p in BSD.primes: if p == 2: continue if galrep.is_reducible(p): primes_to_remove.append(p) if verbosity > 0: print('True for p=%s by Mazur' % p) for p in primes_to_remove: BSD.primes.remove(p) if p in BSD.proof: BSD.proof[p].append('Mazur') else: BSD.proof[p] = ['Mazur'] BSD.primes.sort() # Try harder to compute the Heegner index, where it matters if heegner_index is None: if max_height < 18: max_height = 18 for D in BSD.heegner_index_upper_bound: M = BSD.heegner_index_upper_bound[D] for p in kolyvagin_primes: if p not in BSD.primes or p == 3: continue if verbosity > 0: print(' p = %d: Trying harder for Heegner index' % p) obt = 0 while p**(BSD.sha_an.ord(p)/2+1) <= M and max_height < 22: if verbosity > 2: print(' trying max_height =', max_height) old_bound = M M, _, exact = BSD.curve.heegner_index_bound(D, max_height=max_height, secs_dc=secs_hi) if M == -1: max_height += 1 continue if exact is not False: heegner_index = exact BSD.heegner_indexes[D] = exact M = exact if verbosity > 2: print(' heegner index =', M) else: M = max(M+[1]) if verbosity > 2: print(' bound =', M) if old_bound == M: obt += 1 if obt == 2: break max_height += 1 BSD.heegner_index_upper_bound[D] = min(M,BSD.heegner_index_upper_bound[D]) low, upp = BSD.bounds[p] expn = 0 while p**(expn+1) <= M: expn += 1 if 2*expn < upp: upp = 2*expn BSD.bounds[p] = (low,upp) if verbosity > 0: print(' got better bound on ord_p =', upp) if low == upp: if upp != BSD.sha_an.ord(p): raise RuntimeError else: if verbosity > 0: print(' proven!') BSD.primes.remove(p) break for p in kolyvagin_primes: if p not in BSD.primes or p == 3: continue for D in BSD.curve.heegner_discriminants_list(4): if D in BSD.heegner_index_upper_bound: continue print(' discriminant', D) if verbosity > 0: print('p = %d: Trying discriminant = %d for Heegner index' % (p, D)) max_height = max(10, BSD.curve.quadratic_twist(D).CPS_height_bound()) obt = 0 while True: if verbosity > 2: print(' trying max_height =', max_height) old_bound = M if p**(BSD.sha_an.ord(p)/2+1) > M or max_height >= 22: break M, _, exact = BSD.curve.heegner_index_bound(D, max_height=max_height, secs_dc=secs_hi) if M == -1: max_height += 1 continue if exact is not False: heegner_index = exact BSD.heegner_indexes[D] = exact M = exact if verbosity > 2: print(' heegner index =', M) else: M = max(M+[1]) if verbosity > 2: print(' bound =', M) if old_bound == M: obt += 1 if obt == 2: break max_height += 1 BSD.heegner_index_upper_bound[D] = M low, upp = BSD.bounds[p] expn = 0 while p**(expn+1) <= M: expn += 1 if 2*expn < upp: upp = 2*expn BSD.bounds[p] = (low,upp) if verbosity > 0: print(' got better bound =', upp) if low == upp: if upp != BSD.sha_an.ord(p): raise RuntimeError else: if verbosity > 0: print(' proven!') BSD.primes.remove(p) break # print some extra information if verbosity > 1: if len(BSD.primes) > 0: print('Remaining primes:') for p in BSD.primes: s = 'p = ' + str(p) + ': ' if galrep.is_irreducible(p): s += 'ir' s += 'reducible, ' if not galrep.is_surjective(p): s += 'not ' s += 'surjective, ' a_p = BSD.curve.an(p) if BSD.curve.is_good(p): if a_p%p != 0: s += 'good ordinary' else: s += 'good, non-ordinary' else: assert BSD.curve.is_minimal() if a_p == 0: s += 'additive' elif a_p == 1: s += 'split multiplicative' elif a_p == -1: s += 'non-split multiplicative' if BSD.curve.tamagawa_product()%p==0: s += ', divides a Tamagawa number' if p in BSD.bounds: s += '\n (%d <= ord_p <= %d)'%BSD.bounds[p] else: s += '\n (no bounds found)' s += '\n ord_p(#Sha_an) = %d'%BSD.sha_an.ord(p) if heegner_index is None: may_divide = True for D in BSD.heegner_index_upper_bound: if p > BSD.heegner_index_upper_bound[D] or p not in kolyvagin_primes: may_divide = False if may_divide: s += '\n may divide the Heegner index, for which only a bound was computed' print(s) if BSD.curve.has_cm(): if BSD.rank == 1: BSD.proof['reason_finite'] = 'Rubin&Kolyvagin' else: BSD.proof['reason_finite'] = 'Rubin' else: BSD.proof['reason_finite'] = 'Kolyvagin' # reduce memory footprint of BSD object: BSD.curve = BSD.curve.label() BSD.Sha = None return BSD if return_BSD else BSD.primes
def _find_scaling_L_ratio(self): r""" This function is use to set ``_scaling``, the factor used to adjust the scalar multiple of the modular symbol. If `[0]`, the modular symbol evaluated at 0, is non-zero, we can just scale it with respect to the approximation of the L-value. It is known that the quotient is a rational number with small denominator. Otherwise we try to scale using quadratic twists. ``_scaling`` will be set to a rational non-zero multiple if we succeed and to 1 otherwise. Even if we fail we scale at least to make up the difference between the periods of the `X_0`-optimal curve and our given curve `E` in the isogeny class. EXAMPLES:: sage : m = EllipticCurve('11a1').modular_symbol(use_eclib=True) sage : m._scaling 1 sage: m = EllipticCurve('11a2').modular_symbol(use_eclib=True) sage: m._scaling 5/2 sage: m = EllipticCurve('11a3').modular_symbol(use_eclib=True) sage: m._scaling 1/10 sage: m = EllipticCurve('11a1').modular_symbol(use_eclib=False) sage: m._scaling 1/5 sage: m = EllipticCurve('11a2').modular_symbol(use_eclib=False) sage: m._scaling 1 sage: m = EllipticCurve('11a3').modular_symbol(use_eclib=False) sage: m._scaling 1/25 sage: m = EllipticCurve('37a1').modular_symbol(use_eclib=False) sage: m._scaling 1 sage: m = EllipticCurve('37a1').modular_symbol(use_eclib=True) sage: m._scaling -1 sage: m = EllipticCurve('389a1').modular_symbol(use_eclib=True) sage: m._scaling -1/2 sage: m = EllipticCurve('389a1').modular_symbol(use_eclib=False) sage: m._scaling 2 sage: m = EllipticCurve('196a1').modular_symbol(use_eclib=False) sage: m._scaling 1/2 Some harder cases fail:: sage: m = EllipticCurve('121b1').modular_symbol(use_eclib=False) Warning : Could not normalize the modular symbols, maybe all further results will be multiplied by -1, 2 or -2. sage: m._scaling 1 TESTS:: sage: rk0 = ['11a1', '11a2', '15a1', '27a1', '37b1'] sage: for la in rk0: # long time (3s on sage.math, 2011) ....: E = EllipticCurve(la) ....: me = E.modular_symbol(use_eclib = True) ....: ms = E.modular_symbol(use_eclib = False) ....: print E.lseries().L_ratio()*E.real_components(), me(0), ms(0) 1/5 1/5 1/5 1 1 1 1/4 1/4 1/4 1/3 1/3 1/3 2/3 2/3 2/3 sage: rk1 = ['37a1','43a1','53a1', '91b1','91b2','91b3'] sage: [EllipticCurve(la).modular_symbol(use_eclib=True)(0) for la in rk1] # long time (1s on sage.math, 2011) [0, 0, 0, 0, 0, 0] sage: for la in rk1: # long time (8s on sage.math, 2011) ....: E = EllipticCurve(la) ....: m = E.modular_symbol(use_eclib = True) ....: lp = E.padic_lseries(5) ....: for D in [5,17,12,8]: ....: ED = E.quadratic_twist(D) ....: md = sum([kronecker(D,u)*m(ZZ(u)/D) for u in range(D)]) ....: etaD = lp._quotient_of_periods_to_twist(D) ....: assert ED.lseries().L_ratio()*ED.real_components() * etaD == md """ E = self._E self._scaling = 1 # by now. self._failed_to_scale = False if self._sign == 1: at0 = self(0) # print 'modular symbol evaluates to ',at0,' at 0' if at0 != 0: l1 = self.__lalg__(1) if at0 != l1: verbose('scale modular symbols by %s' % (l1 / at0)) self._scaling = l1 / at0 else: # if [0] = 0, we can still hope to scale it correctly by considering twists of E Dlist = [ 5, 8, 12, 13, 17, 21, 24, 28, 29, 33, 37, 40, 41, 44, 53, 56, 57, 60, 61, 65, 69, 73, 76, 77, 85, 88, 89, 92, 93, 97 ] # a list of positive fundamental discriminants j = 0 at0 = 0 # computes [0]+ for the twist of E by D until one value is non-zero while j < 30 and at0 == 0: D = Dlist[j] # the following line checks if the twist of the newform of E by D is a newform # this is to avoid that we 'twist back' if all( valuation(E.conductor(), ell) <= valuation(D, ell) for ell in prime_divisors(D)): at0 = sum([ kronecker_symbol(D, u) * self(ZZ(u) / D) for u in range(1, abs(D)) ]) j += 1 if j == 30 and at0 == 0: # curves like "121b1", "225a1", "225e1", "256a1", "256b1", "289a1", "361a1", "400a1", "400c1", "400h1", "441b1", "441c1", "441d1", "441f1 .. will arrive here self._failed_to_scale = True self.__scale_by_periods_only__() else: l1 = self.__lalg__(D) if at0 != l1: verbose('scale modular symbols by %s found at D=%s ' % (l1 / at0, D), level=2) self._scaling = l1 / at0 else: # that is when sign = -1 Dlist = [ -3, -4, -7, -8, -11, -15, -19, -20, -23, -24, -31, -35, -39, -40, -43, -47, -51, -52, -55, -56, -59, -67, -68, -71, -79, -83, -84, -87, -88, -91 ] # a list of negative fundamental discriminants j = 0 at0 = 0 while j < 30 and at0 == 0: # computes [0]+ for the twist of E by D until one value is non-zero D = Dlist[j] if all( valuation(E.conductor(), ell) <= valuation(D, ell) for ell in prime_divisors(D)): at0 = -sum([ kronecker_symbol(D, u) * self(ZZ(u) / D) for u in range(1, abs(D)) ]) j += 1 if j == 30 and at0 == 0: # no more hope for a normalization # we do at least a scaling with the quotient of the periods self._failed_to_scale = True self.__scale_by_periods_only__() else: l1 = self.__lalg__(D) if at0 != l1: verbose('scale modular symbols by %s' % (l1 / at0)) self._scaling = l1 / at0
def mass__by_Siegel_densities(self, odd_algorithm="Pall", even_algorithm="Watson"): """ Gives the mass of transformations (det 1 and -1). WARNING: THIS IS BROKEN RIGHT NOW... =( Optional Arguments: - When p > 2 -- odd_algorithm = "Pall" (only one choice for now) - When p = 2 -- even_algorithm = "Kitaoka" or "Watson" REFERENCES: - Nipp's Book "Tables of Quaternary Quadratic Forms". - Papers of Pall (only for p>2) and Watson (for `p=2` -- tricky!). - Siegel, Milnor-Hussemoller, Conway-Sloane Paper IV, Kitoaka (all of which have problems...) EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1]) sage: m = Q.mass__by_Siegel_densities(); m 1/384 sage: m - (2^Q.dim() * factorial(Q.dim()))^(-1) 0 :: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1]) sage: m = Q.mass__by_Siegel_densities(); m 1/48 sage: m - (2^Q.dim() * factorial(Q.dim()))^(-1) 0 """ ## Setup n = self.dim() s = (n - 1) // 2 if n % 2 != 0: char_d = squarefree_part(2 * self.det()) ## Accounts for the det as a QF else: char_d = squarefree_part(self.det()) ## Form the generic zeta product generic_prod = ZZ(2) * (pi)**(-ZZ(n) * (n + 1) / 4) ########################################## generic_prod *= (self.det())**( ZZ(n + 1) / 2) ## ***** This uses the Hessian Determinant ******** ########################################## #print "gp1 = ", generic_prod generic_prod *= prod([gamma__exact(ZZ(j) / 2) for j in range(1, n + 1)]) #print "\n---", [(ZZ(j)/2, gamma__exact(ZZ(j)/2)) for j in range(1,n+1)] #print "\n---", prod([gamma__exact(ZZ(j)/2) for j in range(1,n+1)]) #print "gp2 = ", generic_prod generic_prod *= prod([zeta__exact(ZZ(j)) for j in range(2, 2 * s + 1, 2)]) #print "\n---", [zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)] #print "\n---", prod([zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)]) #print "gp3 = ", generic_prod if (n % 2 == 0): generic_prod *= quadratic_L_function__exact(n // 2, ZZ(-1)**(n // 2) * char_d) #print " NEW = ", ZZ(1) * quadratic_L_function__exact(n/2, (-1)**(n/2) * char_d) #print #print "gp4 = ", generic_prod #print "generic_prod =", generic_prod ## Determine the adjustment factors adj_prod = ZZ.one() for p in prime_divisors(2 * self.det()): ## Cancel out the generic factors p_adjustment = prod([1 - ZZ(p)**(-j) for j in range(2, 2 * s + 1, 2)]) if (n % 2 == 0): p_adjustment *= (1 - kronecker( (-1)**(n // 2) * char_d, p) * ZZ(p)**(-n // 2)) #print " EXTRA = ", ZZ(1) * (1 - kronecker((-1)**(n/2) * char_d, p) * ZZ(p)**(-n/2)) #print "Factor to cancel the generic one:", p_adjustment ## Insert the new mass factors if p == 2: if even_algorithm == "Kitaoka": p_adjustment = p_adjustment / self.Kitaoka_mass_at_2() elif even_algorithm == "Watson": p_adjustment = p_adjustment / self.Watson_mass_at_2() else: raise TypeError( "There is a problem -- your even_algorithm argument is invalid. Try again. =(" ) else: if odd_algorithm == "Pall": p_adjustment = p_adjustment / self.Pall_mass_density_at_odd_prime( p) else: raise TypeError( "There is a problem -- your optional arguments are invalid. Try again. =(" ) #print "p_adjustment for p =", p, "is", p_adjustment ## Put them together (cumulatively) adj_prod *= p_adjustment #print "Cumulative adj_prod =", adj_prod ## Extra adjustment for the case of a 2-dimensional form. #if (n == 2): # generic_prod *= 2 ## Return the mass mass = generic_prod * adj_prod return mass
def basis(self, reduce=True): r""" Produce a basis for the free abelian group of eta-products of level N (under multiplication), attempting to find basis vectors of the smallest possible degree. INPUT: - ``reduce`` - a boolean (default True) indicating whether or not to apply LLL-reduction to the calculated basis EXAMPLE:: sage: EtaGroup(5).basis() [Eta product of level 5 : (eta_1)^6 (eta_5)^-6] sage: EtaGroup(12).basis() [Eta product of level 12 : (eta_1)^2 (eta_2)^1 (eta_3)^2 (eta_4)^-1 (eta_6)^-7 (eta_12)^3, Eta product of level 12 : (eta_1)^-4 (eta_2)^2 (eta_3)^4 (eta_6)^-2, Eta product of level 12 : (eta_1)^-1 (eta_2)^3 (eta_3)^3 (eta_4)^-2 (eta_6)^-9 (eta_12)^6, Eta product of level 12 : (eta_1)^1 (eta_2)^-1 (eta_3)^-3 (eta_4)^-2 (eta_6)^7 (eta_12)^-2, Eta product of level 12 : (eta_1)^-6 (eta_2)^9 (eta_3)^2 (eta_4)^-3 (eta_6)^-3 (eta_12)^1] sage: EtaGroup(12).basis(reduce=False) # much bigger coefficients [Eta product of level 12 : (eta_2)^24 (eta_12)^-24, Eta product of level 12 : (eta_1)^-336 (eta_2)^576 (eta_3)^696 (eta_4)^-216 (eta_6)^-576 (eta_12)^-144, Eta product of level 12 : (eta_1)^-8 (eta_2)^-2 (eta_6)^2 (eta_12)^8, Eta product of level 12 : (eta_1)^1 (eta_2)^9 (eta_3)^13 (eta_4)^-4 (eta_6)^-15 (eta_12)^-4, Eta product of level 12 : (eta_1)^15 (eta_2)^-24 (eta_3)^-29 (eta_4)^9 (eta_6)^24 (eta_12)^5] ALGORITHM: An eta product of level `N` is uniquely determined by the integers `r_d` for `d | N` with `d < N`, since `\sum_{d | N} r_d = 0`. The valid `r_d` are those that satisfy two congruences modulo 24, and one congruence modulo 2 for every prime divisor of N. We beef up the congruences modulo 2 to congruences modulo 24 by multiplying by 12. To calculate the kernel of the ensuing map `\ZZ^m \to (\ZZ/24\ZZ)^n` we lift it arbitrarily to an integer matrix and calculate its Smith normal form. This gives a basis for the lattice. This lattice typically contains "large" elements, so by default we pass it to the reduce_basis() function which performs LLL-reduction to give a more manageable basis. """ N = self.level() divs = divisors(N)[:-1] s = len(divs) primedivs = prime_divisors(N) rows = [] for i in xrange(s): # generate a row of relation matrix row = [ Mod(divs[i], 24) - Mod(N, 24), Mod(N/divs[i], 24) - Mod(1, 24)] for p in primedivs: row.append( Mod(12*(N/divs[i]).valuation(p), 24)) rows.append(row) M = matrix(IntegerModRing(24), rows) Mlift = M.change_ring(ZZ) # now we compute elementary factors of Mlift S,U,V = Mlift.smith_form() good_vects = [] for i in xrange(U.nrows()): vect = U.row(i) nf = (i < S.ncols() and S[i,i]) or 0 good_vects.append((vect * 24/gcd(nf, 24)).list()) for v in good_vects: v.append(-sum([r for r in v])) dicts = [] for v in good_vects: dicts.append({}) for i in xrange(s): dicts[-1][divs[i]] = v[i] dicts[-1][N] = v[-1] if reduce: return self.reduce_basis([ self(d) for d in dicts]) else: return [self(d) for d in dicts]
def series(self, n=2, quadratic_twist=+1, prec=5): r""" Returns the `n`-th approximation to the `p`-adic L-series as a power series in `T` (corresponding to `\gamma-1` with `\gamma=1+p` as a generator of `1+p\ZZ_p`). Each coefficient is a `p`-adic number whose precision is provably correct. Here the normalization of the `p`-adic L-series is chosen such that `L_p(J,1) = (1-1/\alpha)^2 L(J,1)/\Omega_J` where `\alpha` is the unit root INPUT: - ``n`` - (default: 2) a positive integer - ``quadratic_twist`` - (default: +1) a fundamental discriminant of a quadratic field, coprime to the conductor of the curve - ``prec`` - (default: 5) maximal number of terms of the series to compute; to compute as many as possible just give a very large number for ``prec``; the result will still be correct. ALIAS: power_series is identical to series. EXAMPLES:: sage: J = J0(188)[0] sage: p = 7 sage: L = J.padic_lseries(p) sage: L.is_ordinary() True sage: f = L.series(2) sage: f[0] O(7^20) sage: f[1].norm() 3 + 4*7 + 3*7^2 + 6*7^3 + 5*7^4 + 5*7^5 + 6*7^6 + 4*7^7 + 5*7^8 + 7^10 + 5*7^11 + 4*7^13 + 4*7^14 + 5*7^15 + 2*7^16 + 5*7^17 + 7^18 + 7^19 + O(7^20) """ n = ZZ(n) if n < 1: raise ValueError("n (={0}) must be a positive integer".format(n)) if not self.is_ordinary(): raise ValueError("p (={0}) must be an ordinary prime".format( self._p)) # check if the conditions on quadratic_twist are satisfied D = ZZ(quadratic_twist) if D != 1: if D % 4 == 0: d = D // 4 if not d.is_squarefree() or d % 4 == 1: raise ValueError( "quadratic_twist (={0}) must be a fundamental discriminant of a quadratic field" .format(D)) else: if not D.is_squarefree() or D % 4 != 1: raise ValueError( "quadratic_twist (={0}) must be a fundamental discriminant of a quadratic field" .format(D)) if gcd(D, self._p) != 1: raise ValueError( "quadratic twist (={0}) must be coprime to p (={1}) ". format(D, self._p)) if gcd(D, self._E.conductor()) != 1: for ell in prime_divisors(D): if valuation(self._E.conductor(), ell) > valuation(D, ell): raise ValueError( "can not twist a curve of conductor (={0}) by the quadratic twist (={1})." .format(self._E.conductor(), D)) p = self._p if p == 2 and self._normalize: print('Warning : For p=2 the normalization might not be correct !') #verbose("computing L-series for p=%s, n=%s, and prec=%s"%(p,n,prec)) # bounds = self._prec_bounds(n,prec) # padic_prec = max(bounds[1:]) + 5 padic_prec = 10 # verbose("using p-adic precision of %s"%padic_prec) res_series_prec = min(p**(n - 1), prec) verbose("using series precision of %s" % res_series_prec) ans = self._get_series_from_cache(n, res_series_prec, D) if not ans is None: verbose("found series in cache") return ans K = QQ gamma = K(1 + p) R = PowerSeriesRing(K, 'T', res_series_prec) T = R(R.gen(), res_series_prec) #L = R(0) one_plus_T_factor = R(1) gamma_power = K(1) teich = self.teichmuller(padic_prec) p_power = p**(n - 1) # F = Qp(p,padic_prec) verbose("Now iterating over %s summands" % ((p - 1) * p_power)) verbose_level = get_verbose() count_verb = 0 alphas = self.alpha() #print len(alphas) Lprod = [] self._emb = 0 if len(alphas) == 2: split = True else: split = False for alpha in alphas: L = R(0) self._emb = self._emb + 1 for j in range(p_power): s = K(0) if verbose_level >= 2 and j / p_power * 100 > count_verb + 3: verbose("%.2f percent done" % (float(j) / p_power * 100)) count_verb += 3 for a in range(1, p): if split: b = (teich[a]) % ZZ(p**n) b = b * gamma_power else: b = teich[a] * gamma_power s += self.measure(b, n, padic_prec, D, alpha) L += s * one_plus_T_factor one_plus_T_factor *= 1 + T gamma_power *= gamma Lprod = Lprod + [L] if len(Lprod) == 1: return Lprod[0] else: return Lprod[0] * Lprod[1]
def siegel_product(self, u): """ Computes the infinite product of local densities of the quadratic form for the number `u`. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1]) sage: Q.theta_series(11) 1 + 8*q + 24*q^2 + 32*q^3 + 24*q^4 + 48*q^5 + 96*q^6 + 64*q^7 + 24*q^8 + 104*q^9 + 144*q^10 + O(q^11) sage: Q.siegel_product(1) 8 sage: Q.siegel_product(2) ## This one is wrong -- expect 24, and the higher powers of 2 don't work... =( 24 sage: Q.siegel_product(3) 32 sage: Q.siegel_product(5) 48 sage: Q.siegel_product(6) 96 sage: Q.siegel_product(7) 64 sage: Q.siegel_product(9) 104 sage: Q.local_density(2,1) 1 sage: M = 4; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 1]) / M^3 1 sage: M = 16; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 1]) / M^3 # long time (2s on sage.math, 2014) 1 sage: Q.local_density(2,2) 3/2 sage: M = 4; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 2]) / M^3 3/2 sage: M = 16; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 2]) / M^3 # long time (2s on sage.math, 2014) 3/2 TESTS:: sage: [1] + [Q.siegel_product(ZZ(a)) for a in range(1,11)] == Q.theta_series(11).list() # long time (2s on sage.math, 2014) True """ ## Protect u (since it fails often if it's an just an int!) u = ZZ(u) n = self.dim() d = self.det() ## ??? Warning: This is a factor of 2^n larger than it should be! ## DIAGNOSTIC verbose("n = " + str(n)) verbose("d = " + str(d)) verbose("In siegel_product: d = ", d, "\n"); ## Product of "bad" places to omit S = 2 * d * u ## DIAGNOSTIC verbose("siegel_product Break 1. \n") verbose(" u = ", u, "\n") ## Make the odd generic factors if ((n % 2) == 1): m = (n-1) // 2 d1 = fundamental_discriminant(((-1)**m) * 2*d * u) ## Replaced d by 2d here to compensate for the determinant f = abs(d1) ## gaining an odd power of 2 by using the matrix of 2Q instead ## of the matrix of Q. ## --> Old d1 = CoreDiscriminant((mpz_class(-1)^m) * d * u); ## Make the ratio of factorials factor: [(2m)! / m!] * prod_{i=1}^m (2*i-1) factor1 = 1 for i in range(1, m+1): factor1 *= 2*i - 1 for i in range(m+1, 2*m + 1): factor1 *= i genericfactor = factor1 * ((u / f) ** m) \ * QQ(sqrt((2 ** n) * f) / (u * d)) \ * abs(QuadraticBernoulliNumber(m, d1) / bernoulli(2*m)) ## DIAGNOSTIC verbose("siegel_product Break 2. \n") ## Make the even generic factor if ((n % 2) == 0): m = n // 2 d1 = fundamental_discriminant(((-1)**m) * d) f = abs(d1) ## DIAGNOSTIC #cout << " mpz_class(-1)^m = " << (mpz_class(-1)^m) << " and d = " << d << endl; #cout << " f = " << f << " and d1 = " << d1 << endl; genericfactor = m / QQ(sqrt(f*d)) \ * ((u/2) ** (m-1)) * (f ** m) \ / abs(QuadraticBernoulliNumber(m, d1)) \ * (2 ** m) ## This last factor compensates for using the matrix of 2*Q ##return genericfactor ## Omit the generic factors in S and compute them separately omit = 1 include = 1 S_divisors = prime_divisors(S) ## DIAGNOSTIC #cout << "\n S is " << S << endl; #cout << " The Prime divisors of S are :"; #PrintV(S_divisors); for p in S_divisors: Q_normal = self.local_normal_form(p) ## DIAGNOSTIC verbose(" p = " + str(p) + " and its Kronecker symbol (d1/p) = (" + str(d1) + "/" + str(p) + ") is " + str(kronecker_symbol(d1, p)) + "\n") omit *= 1 / (1 - (kronecker_symbol(d1, p) / (p**m))) ## DIAGNOSTIC verbose(" omit = " + str(omit) + "\n") verbose(" Q_normal is \n" + str(Q_normal) + "\n") verbose(" Q_normal = \n" + str(Q_normal)) verbose(" p = " + str(p) + "\n") verbose(" u = " +str(u) + "\n") verbose(" include = " + str(include) + "\n") include *= Q_normal.local_density(p, u) ## DIAGNOSTIC #cout << " Including the p = " << p << " factor: " << local_density(Q_normal, p, u) << endl; ## DIAGNSOTIC verbose(" --- Exiting loop \n") #// **************** Important ******************* #// Additional fix (only included for n=4) to deal #// with the power of 2 introduced at the real place #// by working with Q instead of 2*Q. This needs to #// be done for all other n as well... #/* #if (n==4) # genericfactor = 4 * genericfactor; #*/ ## DIAGNSOTIC #cout << endl; #cout << " generic factor = " << genericfactor << endl; #cout << " omit = " << omit << endl; #cout << " include = " << include << endl; #cout << endl; ## DIAGNSOTIC #// cout << "siegel_product Break 3. " << endl; ## Return the final factor (and divide by 2 if n=2) if (n == 2): return (genericfactor * omit * include / 2) else: return (genericfactor * omit * include)
def _find_scaling_L_ratio(self): r""" This function is use to set ``_scaling``, the factor used to adjust the scalar multiple of the modular symbol. If `[0]`, the modular symbol evaluated at 0, is non-zero, we can just scale it with respect to the approximation of the L-value. It is known that the quotient is a rational number with small denominator. Otherwise we try to scale using quadratic twists. ``_scaling`` will be set to a rational non-zero multiple if we succeed and to 1 otherwise. Even if we fail we scale at least to make up the difference between the periods of the `X_0`-optimal curve and our given curve `E` in the isogeny class. EXAMPLES:: sage: m = EllipticCurve('11a1').modular_symbol(implementation="sage") sage: m._scaling 1/5 sage: m = EllipticCurve('11a2').modular_symbol(implementation="sage") sage: m._scaling 1 sage: m = EllipticCurve('11a3').modular_symbol(implementation="sage") sage: m._scaling 1/25 sage: m = EllipticCurve('37a1').modular_symbol(implementation="sage") sage: m._scaling 1 sage: m = EllipticCurve('37a1').modular_symbol() sage: m._scaling 1 sage: m = EllipticCurve('389a1').modular_symbol() sage: m._scaling 1 sage: m = EllipticCurve('389a1').modular_symbol(implementation="sage") sage: m._scaling 2 sage: m = EllipticCurve('196a1').modular_symbol(implementation="sage") sage: m._scaling 1/2 Some harder cases fail:: sage: m = EllipticCurve('121b1').modular_symbol(implementation="sage") Warning : Could not normalize the modular symbols, maybe all further results will be multiplied by -1 and a power of 2 sage: m._scaling 1 TESTS:: sage: rk0 = ['11a1', '11a2', '15a1', '27a1', '37b1'] sage: for la in rk0: # long time (3s on sage.math, 2011) ....: E = EllipticCurve(la) ....: me = E.modular_symbol(implementation="eclib") ....: ms = E.modular_symbol(implementation="sage") ....: print("{} {} {}".format(E.lseries().L_ratio()*E.real_components(), me(0), ms(0))) 1/5 1/5 1/5 1 1 1 1/4 1/4 1/4 1/3 1/3 1/3 2/3 2/3 2/3 sage: rk1 = ['37a1','43a1','53a1', '91b1','91b2','91b3'] sage: [EllipticCurve(la).modular_symbol()(0) for la in rk1] # long time (1s on sage.math, 2011) [0, 0, 0, 0, 0, 0] sage: for la in rk1: # long time (8s on sage.math, 2011) ....: E = EllipticCurve(la) ....: m = E.modular_symbol() ....: lp = E.padic_lseries(5) ....: for D in [5,17,12,8]: ....: ED = E.quadratic_twist(D) ....: md = sum([kronecker(D,u)*m(ZZ(u)/D) for u in range(D)]) ....: etaD = lp._quotient_of_periods_to_twist(D) ....: assert ED.lseries().L_ratio()*ED.real_components() * etaD == md """ E = self._E self._scaling = 1 # initial value, may be changed later. self._failed_to_scale = False if self._sign == 1 : at0 = self(0) if at0 != 0 : l1 = self.__lalg__(1) if at0 != l1: verbose('scale modular symbols by %s'%(l1/at0)) self._scaling = l1/at0 else : # if [0] = 0, we can still hope to scale it correctly by considering twists of E Dlist = [5,8,12,13,17,21,24,28,29, 33, 37, 40, 41, 44, 53, 56, 57, 60, 61, 65, 69, 73, 76, 77, 85, 88, 89, 92, 93, 97] # a list of positive fundamental discriminants j = 0 at0 = 0 # computes [0]+ for the twist of E by D until one value is non-zero while j < 30 and at0 == 0 : D = Dlist[j] # the following line checks if the twist of the newform of E by D is a newform # this is to avoid that we 'twist back' if all( valuation(E.conductor(),ell)<= valuation(D,ell) for ell in prime_divisors(D) ) : at0 = sum([kronecker_symbol(D,u) * self(ZZ(u)/D) for u in range(1,abs(D))]) j += 1 if j == 30 and at0 == 0: # curves like "121b1", "225a1", "225e1", "256a1", "256b1", "289a1", "361a1", "400a1", "400c1", "400h1", "441b1", "441c1", "441d1", "441f1 .. will arrive here print("Warning : Could not normalize the modular symbols, maybe all further results will be multiplied by -1 and a power of 2") self._failed_to_scale = True else : l1 = self.__lalg__(D) if at0 != l1: verbose('scale modular symbols by %s found at D=%s '%(l1/at0,D), level=2) self._scaling = l1/at0 else : # that is when sign = -1 Dlist = [-3,-4,-7,-8,-11,-15,-19,-20,-23,-24, -31, -35, -39, -40, -43, -47, -51, -52, -55, -56, -59, -67, -68, -71, -79, -83, -84, -87, -88, -91] # a list of negative fundamental discriminants j = 0 at0 = 0 while j < 30 and at0 == 0 : # computes [0]+ for the twist of E by D until one value is non-zero D = Dlist[j] if all( valuation(E.conductor(),ell)<= valuation(D,ell) for ell in prime_divisors(D) ) : at0 = - sum([kronecker_symbol(D,u) * self(ZZ(u)/D) for u in range(1,abs(D))]) j += 1 if j == 30 and at0 == 0: # no more hope for a normalization print("Warning : Could not normalize the modular symbols, maybe all further results will be multiplied by -1 and a power of 2") self._failed_to_scale = True else : l1 = self.__lalg__(D) if at0 != l1: verbose('scale modular symbols by %s'%(l1/at0)) self._scaling = l1/at0
def basis(self, reduce=True): r""" Produce a basis for the free abelian group of eta-products of level N (under multiplication), attempting to find basis vectors of the smallest possible degree. INPUT: - ``reduce`` - a boolean (default True) indicating whether or not to apply LLL-reduction to the calculated basis EXAMPLES:: sage: EtaGroup(5).basis() [Eta product of level 5 : (eta_1)^6 (eta_5)^-6] sage: EtaGroup(12).basis() [Eta product of level 12 : (eta_1)^2 (eta_2)^1 (eta_3)^2 (eta_4)^-1 (eta_6)^-7 (eta_12)^3, Eta product of level 12 : (eta_1)^-4 (eta_2)^2 (eta_3)^4 (eta_6)^-2, Eta product of level 12 : (eta_1)^-1 (eta_2)^3 (eta_3)^3 (eta_4)^-2 (eta_6)^-9 (eta_12)^6, Eta product of level 12 : (eta_1)^1 (eta_2)^-1 (eta_3)^-3 (eta_4)^-2 (eta_6)^7 (eta_12)^-2, Eta product of level 12 : (eta_1)^-6 (eta_2)^9 (eta_3)^2 (eta_4)^-3 (eta_6)^-3 (eta_12)^1] sage: EtaGroup(12).basis(reduce=False) # much bigger coefficients [Eta product of level 12 : (eta_2)^24 (eta_12)^-24, Eta product of level 12 : (eta_1)^-336 (eta_2)^576 (eta_3)^696 (eta_4)^-216 (eta_6)^-576 (eta_12)^-144, Eta product of level 12 : (eta_1)^-8 (eta_2)^-2 (eta_6)^2 (eta_12)^8, Eta product of level 12 : (eta_1)^1 (eta_2)^9 (eta_3)^13 (eta_4)^-4 (eta_6)^-15 (eta_12)^-4, Eta product of level 12 : (eta_1)^15 (eta_2)^-24 (eta_3)^-29 (eta_4)^9 (eta_6)^24 (eta_12)^5] ALGORITHM: An eta product of level `N` is uniquely determined by the integers `r_d` for `d | N` with `d < N`, since `\sum_{d | N} r_d = 0`. The valid `r_d` are those that satisfy two congruences modulo 24, and one congruence modulo 2 for every prime divisor of N. We beef up the congruences modulo 2 to congruences modulo 24 by multiplying by 12. To calculate the kernel of the ensuing map `\ZZ^m \to (\ZZ/24\ZZ)^n` we lift it arbitrarily to an integer matrix and calculate its Smith normal form. This gives a basis for the lattice. This lattice typically contains "large" elements, so by default we pass it to the reduce_basis() function which performs LLL-reduction to give a more manageable basis. """ from six.moves import range N = self.level() divs = divisors(N)[:-1] s = len(divs) primedivs = prime_divisors(N) rows = [] for i in range(s): # generate a row of relation matrix row = [ Mod(divs[i], 24) - Mod(N, 24), Mod(N / divs[i], 24) - Mod(1, 24) ] for p in primedivs: row.append(Mod(12 * (N / divs[i]).valuation(p), 24)) rows.append(row) M = matrix(IntegerModRing(24), rows) Mlift = M.change_ring(ZZ) # now we compute elementary factors of Mlift S, U, V = Mlift.smith_form() good_vects = [] for i in range(U.nrows()): vect = U.row(i) nf = (i < S.ncols() and S[i, i]) or 0 good_vects.append((vect * 24 / gcd(nf, 24)).list()) for v in good_vects: v.append(-sum([r for r in v])) dicts = [] for v in good_vects: dicts.append({}) for i in range(s): dicts[-1][divs[i]] = v[i] dicts[-1][N] = v[-1] if reduce: return self.reduce_basis([self(d) for d in dicts]) else: return [self(d) for d in dicts]
def old_submodule(self, p=None): """ Returns the old or p-old submodule of self, i.e. the sum of the images of the degeneracy maps from level `N/p` (for the given prime `p`, or for all primes `p` dividing `N` if `p` is not given). INPUT: - ``p`` - (default: None); if not None, return only the p-old submodule. OUTPUT: the old or p-old submodule of self EXAMPLES:: sage: m = ModularSymbols(33); m.rank() 9 sage: m.old_submodule().rank() 7 sage: m.old_submodule(3).rank() 6 sage: m.new_submodule(11).rank() 8 :: sage: e = DirichletGroup(16)([-1, 1]) sage: M = ModularSymbols(e, 3, sign=1); M Modular Symbols space of dimension 4 and level 16, weight 3, character [-1, 1], sign 1, over Rational Field sage: M.old_submodule() Modular Symbols subspace of dimension 3 of Modular Symbols space of dimension 4 and level 16, weight 3, character [-1, 1], sign 1, over Rational Field Illustrate that :trac:`10664` is fixed:: sage: ModularSymbols(DirichletGroup(42)[7], 6, sign=1).old_subspace(3) Modular Symbols subspace of dimension 0 of Modular Symbols space of dimension 40 and level 42, weight 6, character [-1, -1], sign 1, over Rational Field """ try: if self.__is_old[p]: return self except AttributeError: self.__is_old = {} except KeyError: pass if self.rank() == 0: self.__is_old[p] = True return self try: return self.__old_submodule[p] except AttributeError: self.__old_submodule = {} except KeyError: pass # Construct the degeneracy map d. N = self.level() d = None eps = self.character() if eps is None: f = 1 else: f = eps.conductor() if p is None: D = arith.prime_divisors(N) else: if N % p != 0: raise ValueError("p must divide the level.") D = [p] for q in D: NN = N // q if NN % f == 0: M = self.hecke_module_of_level(NN) # Here it is vital to pass self as an argument to # degeneracy_map, because M and the level N don't uniquely # determine self (e.g. the degeneracy map from level 1 to level # N could go to Gamma0(N), Gamma1(N) or anything in between) d1 = M.degeneracy_map(self, 1).matrix() if d is None: d = d1 else: d = d.stack(d1) d = d.stack(M.degeneracy_map(self, q).matrix()) #end if #end for if d is None: os = self.zero_submodule() else: os = self.submodule(d.image(), check=False) self.__is_old[p] = (os == self) os.__is_old = {p: True} os._is_full_hecke_module = True self.__old_submodule[p] = os return os
def siegel_product(self, u): """ Computes the infinite product of local densities of the quadratic form for the number `u`. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1]) sage: Q.theta_series(11) 1 + 8*q + 24*q^2 + 32*q^3 + 24*q^4 + 48*q^5 + 96*q^6 + 64*q^7 + 24*q^8 + 104*q^9 + 144*q^10 + O(q^11) sage: Q.siegel_product(1) 8 sage: Q.siegel_product(2) ## This one is wrong -- expect 24, and the higher powers of 2 don't work... =( 24 sage: Q.siegel_product(3) 32 sage: Q.siegel_product(5) 48 sage: Q.siegel_product(6) 96 sage: Q.siegel_product(7) 64 sage: Q.siegel_product(9) 104 sage: Q.local_density(2,1) 1 sage: M = 4; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 1]) / M^3 1 sage: M = 16; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 1]) / M^3 # long time (2s on sage.math, 2014) 1 sage: Q.local_density(2,2) 3/2 sage: M = 4; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 2]) / M^3 3/2 sage: M = 16; len([v for v in mrange([M,M,M,M]) if Q(v) % M == 2]) / M^3 # long time (2s on sage.math, 2014) 3/2 TESTS:: sage: [1] + [Q.siegel_product(ZZ(a)) for a in range(1,11)] == Q.theta_series(11).list() # long time (2s on sage.math, 2014) True """ ## Protect u (since it fails often if it's an just an int!) u = ZZ(u) n = self.dim() d = self.det( ) ## ??? Warning: This is a factor of 2^n larger than it should be! # DIAGNOSTIC verbose("n = " + str(n)) verbose("d = " + str(d)) verbose("In siegel_product: d = " + str(d) + "\n") ## Product of "bad" places to omit S = 2 * d * u ## DIAGNOSTIC verbose("siegel_product Break 1. \n") verbose(" u = " + str(u) + "\n") ## Make the odd generic factors if ((n % 2) == 1): m = (n - 1) // 2 d1 = fundamental_discriminant( ((-1)**m) * 2 * d * u) ## Replaced d by 2d here to compensate for the determinant f = abs( d1) ## gaining an odd power of 2 by using the matrix of 2Q instead ## of the matrix of Q. ## --> Old d1 = CoreDiscriminant((mpz_class(-1)^m) * d * u); ## Make the ratio of factorials factor: [(2m)! / m!] * prod_{i=1}^m (2*i-1) factor1 = 1 for i in range(1, m + 1): factor1 *= 2 * i - 1 for i in range(m + 1, 2 * m + 1): factor1 *= i genericfactor = factor1 * ((u / f) ** m) \ * QQ(sqrt((2 ** n) * f) / (u * d)) \ * abs(QuadraticBernoulliNumber(m, d1) / bernoulli(2*m)) ## DIAGNOSTIC verbose("siegel_product Break 2. \n") ## Make the even generic factor if ((n % 2) == 0): m = n // 2 d1 = fundamental_discriminant(((-1)**m) * d) f = abs(d1) ## DIAGNOSTIC #cout << " mpz_class(-1)^m = " << (mpz_class(-1)^m) << " and d = " << d << endl; #cout << " f = " << f << " and d1 = " << d1 << endl; genericfactor = m / QQ(sqrt(f*d)) \ * ((u/2) ** (m-1)) * (f ** m) \ / abs(QuadraticBernoulliNumber(m, d1)) \ * (2 ** m) ## This last factor compensates for using the matrix of 2*Q ##return genericfactor ## Omit the generic factors in S and compute them separately omit = 1 include = 1 S_divisors = prime_divisors(S) ## DIAGNOSTIC #cout << "\n S is " << S << endl; #cout << " The Prime divisors of S are :"; #PrintV(S_divisors); for p in S_divisors: Q_normal = self.local_normal_form(p) ## DIAGNOSTIC verbose(" p = " + str(p) + " and its Kronecker symbol (d1/p) = (" + str(d1) + "/" + str(p) + ") is " + str(kronecker_symbol(d1, p)) + "\n") omit *= 1 / (1 - (kronecker_symbol(d1, p) / (p**m))) ## DIAGNOSTIC verbose(" omit = " + str(omit) + "\n") verbose(" Q_normal is \n" + str(Q_normal) + "\n") verbose(" Q_normal = \n" + str(Q_normal)) verbose(" p = " + str(p) + "\n") verbose(" u = " + str(u) + "\n") verbose(" include = " + str(include) + "\n") include *= Q_normal.local_density(p, u) ## DIAGNOSTIC #cout << " Including the p = " << p << " factor: " << local_density(Q_normal, p, u) << endl; ## DIAGNOSTIC verbose(" --- Exiting loop \n") #// **************** Important ******************* #// Additional fix (only included for n=4) to deal #// with the power of 2 introduced at the real place #// by working with Q instead of 2*Q. This needs to #// be done for all other n as well... #/* #if (n==4) # genericfactor = 4 * genericfactor; #*/ ## DIAGNOSTIC #cout << endl; #cout << " generic factor = " << genericfactor << endl; #cout << " omit = " << omit << endl; #cout << " include = " << include << endl; #cout << endl; ## DIAGNOSTIC #// cout << "siegel_product Break 3. " << endl; ## Return the final factor (and divide by 2 if n=2) if n == 2: return genericfactor * omit * include / 2 else: return genericfactor * omit * include
def new_submodule(self, p=None): """ Returns the new or p-new submodule of self. INPUT: - ``p`` - (default: None); if not None, return only the p-new submodule. OUTPUT: the new or p-new submodule of self, i.e. the intersection of the kernel of the degeneracy lowering maps to level `N/p` (for the given prime `p`, or for all prime divisors of `N` if `p` is not given). If self is cuspidal this is a Hecke-invariant complement of the corresponding old submodule, but this may break down on Eisenstein subspaces (see the amusing example in William Stein's book of a form which is new and old at the same time). EXAMPLES:: sage: m = ModularSymbols(33); m.rank() 9 sage: m.new_submodule().rank() 3 sage: m.new_submodule(3).rank() 4 sage: m.new_submodule(11).rank() 8 """ try: if self.__is_new[p]: return self except AttributeError: self.__is_new = {} except KeyError: pass if self.rank() == 0: self.__is_new[p] = True return self try: return self.__new_submodule[p] except AttributeError: self.__new_submodule = {} except KeyError: pass # Construct the degeneracy map d. N = self.level() d = None eps = self.character() if eps is None: f = 1 else: f = eps.conductor() if p is None: D = arith.prime_divisors(N) else: if N % p != 0: raise ValueError("p must divide the level.") D = [p] for q in D: # Here we are only using degeneracy *lowering* maps, so it is fine # to be careless and pass an integer for the level. One needs to be # a bit more careful with degeneracy *raising* maps for the Gamma1 # and GammaH cases. if ((N // q) % f) == 0: NN = N // q d1 = self.degeneracy_map(NN, 1).matrix() if d is None: d = d1 else: d = d.augment(d1) d = d.augment(self.degeneracy_map(NN, q).matrix()) #end if #end for if d is None or d == 0: self.__is_new[p] = True return self else: self.__is_new[p] = False ns = self.submodule(d.kernel(), check=False) ns.__is_new = {p: True} ns._is_full_hecke_module = True self.__new_submodule[p] = ns return ns
def prove_BSD(E, verbosity=0, two_desc='mwrank', proof=None, secs_hi=5, return_BSD=False): r""" Attempts to prove the Birch and Swinnerton-Dyer conjectural formula for `E`, returning a list of primes `p` for which this function fails to prove BSD(E,p). Here, BSD(E,p) is the statement: "the Birch and Swinnerton-Dyer formula holds up to a rational number coprime to `p`." INPUT: - ``E`` - an elliptic curve - ``verbosity`` - int, how much information about the proof to print. - 0 - print nothing - 1 - print sketch of proof - 2 - print information about remaining primes - ``two_desc`` - string (default ``'mwrank'``), what to use for the two-descent. Options are ``'mwrank', 'simon', 'sage'`` - ``proof`` - bool or None (default: None, see proof.elliptic_curve or sage.structure.proof). If False, this function just immediately returns the empty list. - ``secs_hi`` - maximum number of seconds to try to compute the Heegner index before switching over to trying to compute the Heegner index bound. (Rank 0 only!) - ``return_BSD`` - bool (default: False) whether to return an object which contains information to reconstruct a proof NOTE: When printing verbose output, phrases such as "by Mazur" are referring to the following list of papers: REFERENCES: .. [Cha] B. Cha. Vanishing of some cohomology goups and bounds for the Shafarevich-Tate groups of elliptic curves. J. Number Theory, 111:154- 178, 2005. .. [Jetchev] D. Jetchev. Global divisibility of Heegner points and Tamagawa numbers. Compos. Math. 144 (2008), no. 4, 811--826. .. [Kato] K. Kato. p-adic Hodge theory and values of zeta functions of modular forms. Astérisque, (295):ix, 117-290, 2004. .. [Kolyvagin] V. A. Kolyvagin. On the structure of Shafarevich-Tate groups. Algebraic geometry, 94--121, Lecture Notes in Math., 1479, Springer, Berlin, 1991. .. [LawsonWuthrich] T. Lawson and C. Wuthrich, Vanishing of some Galois cohomology groups for elliptic curves, http://arxiv.org/abs/1505.02940 .. [LumStein] A. Lum, W. Stein. Verification of the Birch and Swinnerton-Dyer Conjecture for Elliptic Curves with Complex Multiplication (unpublished) .. [Mazur] B. Mazur. Modular curves and the Eisenstein ideal. Inst. Hautes Études Sci. Publ. Math. No. 47 (1977), 33--186 (1978). .. [Rubin] K. Rubin. The "main conjectures" of Iwasawa theory for imaginary quadratic fields. Invent. Math. 103 (1991), no. 1, 25--68. .. [SteinWuthrich] W. Stein and C. Wuthrich, Algorithms for the Arithmetic of Elliptic Curves using Iwasawa Theory Mathematics of Computation 82 (2013), 1757-1792. .. [SteinEtAl] G. Grigorov, A. Jorza, S. Patrikis, W. Stein, C. Tarniţǎ. Computational verification of the Birch and Swinnerton-Dyer conjecture for individual elliptic curves. Math. Comp. 78 (2009), no. 268, 2397--2425. EXAMPLES:: sage: EllipticCurve('11a').prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 5} by Kolyvagin. Kolyvagin's bound for p = 5 applies by Lawson-Wuthrich True for p = 5 by Kolyvagin bound [] sage: EllipticCurve('14a').prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. Kolyvagin's bound for p = 3 applies by Lawson-Wuthrich True for p = 3 by Kolyvagin bound [] sage: E = EllipticCurve("20a1") sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. Kato further implies that #Sha[3] is trivial. [] sage: E = EllipticCurve("50b1") sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3, 5} by Kolyvagin. Kolyvagin's bound for p = 3 applies by Lawson-Wuthrich True for p = 3 by Kolyvagin bound Remaining primes: p = 5: reducible, not surjective, additive, divides a Tamagawa number (no bounds found) ord_p(#Sha_an) = 0 [5] sage: E.prove_BSD(two_desc='simon') [5] A rank two curve:: sage: E = EllipticCurve('389a') We know nothing with proof=True:: sage: E.prove_BSD() Set of all prime numbers: 2, 3, 5, 7, ... We (think we) know everything with proof=False:: sage: E.prove_BSD(proof=False) [] A curve of rank 0 and prime conductor:: sage: E = EllipticCurve('19a') sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. Kolyvagin's bound for p = 3 applies by Lawson-Wuthrich True for p = 3 by Kolyvagin bound [] sage: E = EllipticCurve('37a') sage: E.rank() 1 sage: E._EllipticCurve_rational_field__rank {True: 1} sage: E.analytic_rank = lambda : 0 sage: E.prove_BSD() Traceback (most recent call last): ... RuntimeError: It seems that the rank conjecture does not hold for this curve (Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field)! This may be a counterexample to BSD, but is more likely a bug. We test the consistency check for the 2-part of Sha:: sage: E = EllipticCurve('37a') sage: S = E.sha(); S Tate-Shafarevich group for the Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field sage: def foo(use_database): ... return 4 sage: S.an = foo sage: E.prove_BSD() Traceback (most recent call last): ... RuntimeError: Apparent contradiction: 0 <= rank(sha[2]) <= 0, but ord_2(sha_an) = 2 An example with a Tamagawa number at 5:: sage: E = EllipticCurve('123a1') sage: E.prove_BSD(verbosity=2) p = 2: True by 2-descent True for p not in {2, 5} by Kolyvagin. Remaining primes: p = 5: reducible, not surjective, good ordinary, divides a Tamagawa number (no bounds found) ord_p(#Sha_an) = 0 [5] A curve for which 3 divides the order of the Tate-Shafarevich group:: sage: E = EllipticCurve('681b') sage: E.prove_BSD(verbosity=2) # long time p = 2: True by 2-descent... True for p not in {2, 3} by Kolyvagin.... Remaining primes: p = 3: irreducible, surjective, non-split multiplicative (0 <= ord_p <= 2) ord_p(#Sha_an) = 2 [3] A curve for which we need to use ``heegner_index_bound``:: sage: E = EllipticCurve('198b') sage: E.prove_BSD(verbosity=1, secs_hi=1) p = 2: True by 2-descent True for p not in {2, 3} by Kolyvagin. [3] The ``return_BSD`` option gives an object with detailed information about the proof:: sage: E = EllipticCurve('26b') sage: B = E.prove_BSD(return_BSD=True) sage: B.two_tor_rk 0 sage: B.N 26 sage: B.gens [] sage: B.primes [] sage: B.heegner_indexes {-23: 2} TESTS: This was fixed by trac #8184 and #7575:: sage: EllipticCurve('438e1').prove_BSD(verbosity=1) p = 2: True by 2-descent... True for p not in {2} by Kolyvagin. [] :: sage: E = EllipticCurve('960d1') sage: E.prove_BSD(verbosity=1) # long time (4s on sage.math, 2011) p = 2: True by 2-descent True for p not in {2} by Kolyvagin. [] """ if proof is None: from sage.structure.proof.proof import get_flag proof = get_flag(proof, "elliptic_curve") else: proof = bool(proof) if not proof: return [] from copy import copy BSD = BSD_data() # We replace this curve by the optimal curve, which we can do since # truth of BSD(E,p) is invariant under isogeny. BSD.curve = E.optimal_curve() if BSD.curve.has_cm(): # ensure that CM is by a maximal order non_max_j_invs = [-12288000, 54000, 287496, 16581375] if BSD.curve.j_invariant( ) in non_max_j_invs: # is this possible for optimal curves? if verbosity > 0: print 'CM by non maximal order: switching curves' for E in BSD.curve.isogeny_class(): if E.j_invariant() not in non_max_j_invs: BSD.curve = E break BSD.update() galrep = BSD.curve.galois_representation() if two_desc == 'mwrank': M = mwrank_two_descent_work(BSD.curve, BSD.two_tor_rk) elif two_desc == 'simon': M = simon_two_descent_work(BSD.curve, BSD.two_tor_rk) elif two_desc == 'sage': M = native_two_isogeny_descent_work(BSD.curve, BSD.two_tor_rk) else: raise NotImplementedError() rank_lower_bd, rank_upper_bd, sha2_lower_bd, sha2_upper_bd, gens = M assert sha2_lower_bd <= sha2_upper_bd if gens is not None: gens = BSD.curve.saturation(gens)[0] if rank_lower_bd > rank_upper_bd: raise RuntimeError("Apparent contradiction: %d <= rank <= %d." % (rank_lower_bd, rank_upper_bd)) BSD.two_selmer_rank = rank_upper_bd + sha2_lower_bd + BSD.two_tor_rk if sha2_upper_bd == sha2_lower_bd: BSD.rank = rank_lower_bd BSD.bounds[2] = (sha2_lower_bd, sha2_upper_bd) else: BSD.rank = BSD.curve.rank(use_database=True) sha2_upper_bd -= (BSD.rank - rank_lower_bd) BSD.bounds[2] = (sha2_lower_bd, sha2_upper_bd) if verbosity > 0: print "Unable to compute the rank exactly -- used database." if rank_lower_bd > 1: # We do not know BSD(E,p) for even a single p, since it's # an open problem to show that L^r(E,1)/(Reg*Omega) is # rational for any curve with r >= 2. from sage.sets.all import Primes BSD.primes = Primes() if return_BSD: BSD.rank = rank_lower_bd return BSD return BSD.primes if (BSD.sha_an.ord(2) == 0) != (BSD.bounds[2][1] == 0): raise RuntimeError( "Apparent contradiction: %d <= rank(sha[2]) <= %d, but ord_2(sha_an) = %d" % (sha2_lower_bd, sha2_upper_bd, BSD.sha_an.ord(2))) if BSD.bounds[2][0] == BSD.sha_an.ord(2) and BSD.sha_an.ord( 2) == BSD.bounds[2][1]: if verbosity > 0: print 'p = 2: True by 2-descent' BSD.primes = [] BSD.bounds.pop(2) BSD.proof[2] = ['2-descent'] else: BSD.primes = [2] BSD.proof[2] = [('2-descent', ) + BSD.bounds[2]] if len(gens) > rank_lower_bd or \ rank_lower_bd > rank_upper_bd: raise RuntimeError("Something went wrong with 2-descent.") if BSD.rank != len(gens): if BSD.rank != len( BSD.curve._EllipticCurve_rational_field__gens[True]): raise RuntimeError("Could not get generators") gens = BSD.curve._EllipticCurve_rational_field__gens[True] BSD.gens = [BSD.curve.point(x, check=True) for x in gens] if BSD.rank != BSD.curve.analytic_rank(): raise RuntimeError( "It seems that the rank conjecture does not hold for this curve (%s)! This may be a counterexample to BSD, but is more likely a bug." % (BSD.curve)) # reduce set of remaining primes to a finite set import signal kolyvagin_primes = [] heegner_index = None if BSD.rank == 0: for D in BSD.curve.heegner_discriminants_list(10): max_height = max(13, BSD.curve.quadratic_twist(D).CPS_height_bound()) heegner_primes = -1 while heegner_primes == -1: if max_height > 21: break heegner_primes, _, exact = BSD.curve.heegner_index_bound( D, max_height=max_height) max_height += 1 if isinstance(heegner_primes, list): break if not isinstance(heegner_primes, list): raise RuntimeError( "Tried 10 Heegner discriminants, and heegner_index_bound failed each time." ) if exact is not False: heegner_index = exact BSD.heegner_indexes[D] = exact else: BSD.heegner_index_upper_bound[D] = max(heegner_primes + [1]) if 2 in heegner_primes: heegner_primes.remove(2) else: # rank 1 for D in BSD.curve.heegner_discriminants_list(10): I = BSD.curve.heegner_index(D) J = I.is_int() if J[0] and J[1] > 0: I = J[1] else: J = (2 * I).is_int() if J[0] and J[1] > 0: I = J[1] else: continue heegner_index = I BSD.heegner_indexes[D] = I break heegner_primes = [ p for p in arith.prime_divisors(heegner_index) if p != 2 ] assert BSD.sha_an in ZZ and BSD.sha_an > 0 if BSD.curve.has_cm(): if BSD.curve.analytic_rank() == 0: if verbosity > 0: print ' p >= 5: true by Rubin' BSD.primes.append(3) else: K = rings.QuadraticField(BSD.curve.cm_discriminant(), 'a') D_K = K.disc() D_E = BSD.curve.discriminant() if len(K.factor(3)) == 1: # 3 does not split in K BSD.primes.append(3) for p in arith.prime_divisors(D_K): if p >= 5: BSD.primes.append(p) for p in arith.prime_divisors(D_E): if p >= 5 and D_K % p and len( K.factor(p)) == 1: # p is inert in K BSD.primes.append(p) for p in heegner_primes: if p >= 5 and D_E % p != 0 and D_K % p != 0 and len( K.factor(p)) == 1: # p is good for E and inert in K kolyvagin_primes.append(p) for p in arith.prime_divisors(BSD.sha_an): if p >= 5 and D_K % p != 0 and len(K.factor(p)) == 1: if BSD.curve.is_good(p): if verbosity > 2 and p in heegner_primes and heegner_index is None: print 'ALERT: Prime p (%d) >= 5 dividing sha_an, good for E, inert in K, in heegner_primes, should not divide the actual Heegner index' # Note that the following check is not entirely # exhaustive, in case there is a p not dividing # the Heegner index in heegner_primes, # for which only an outer bound was computed if p not in heegner_primes: raise RuntimeError( "p = %d divides sha_an, is of good reduction for E, inert in K, and does not divide the Heegner index. This may be a counterexample to BSD, but is more likely a bug. %s" % (p, BSD.curve)) if verbosity > 0: print 'True for p not in {%s} by Kolyvagin (via Stein & Lum -- unpublished) and Rubin.' % str( list(set(BSD.primes).union(set(kolyvagin_primes))))[1:-1] BSD.proof['finite'] = copy(BSD.primes) else: # no CM # do some tricks to get to a finite set without calling bound_kolyvagin BSD.primes += [p for p in galrep.non_surjective() if p != 2] for p in heegner_primes: if p not in BSD.primes: BSD.primes.append(p) for p in arith.prime_divisors(BSD.sha_an): if p not in BSD.primes and p != 2: BSD.primes.append(p) if verbosity > 0: s = str(BSD.primes)[1:-1] if 2 not in BSD.primes: if len(s) == 0: s = '2' else: s = '2, ' + s print 'True for p not in {' + s + '} by Kolyvagin.' BSD.proof['finite'] = copy(BSD.primes) primes_to_remove = [] for p in BSD.primes: if p == 2: continue if galrep.is_surjective( p) and not BSD.curve.has_additive_reduction(p): if BSD.curve.has_nonsplit_multiplicative_reduction(p): if BSD.rank > 0: continue if p == 3: if (not (BSD.curve.is_ordinary(p) and BSD.curve.is_good(p)) ) and (not BSD.curve. has_split_multiplicative_reduction(p)): continue if BSD.rank > 0: continue if verbosity > 1: print ' p = %d: Trying p_primary_bound' % p p_bound = BSD.Sha.p_primary_bound(p) if p in BSD.proof: BSD.proof[p].append(('Stein-Wuthrich', p_bound)) else: BSD.proof[p] = [('Stein-Wuthrich', p_bound)] if BSD.sha_an.ord(p) == 0 and p_bound == 0: if verbosity > 0: print 'True for p=%d by Stein-Wuthrich.' % p primes_to_remove.append(p) else: if p in BSD.bounds: BSD.bounds[p][1] = min(BSD.bounds[p][1], p_bound) else: BSD.bounds[p] = (0, p_bound) print 'Analytic %d-rank is ' % p + str(BSD.sha_an.ord( p)) + ', actual %d-rank is at most %d.' % (p, p_bound) print ' by Stein-Wuthrich.\n' for p in primes_to_remove: BSD.primes.remove(p) kolyvagin_primes = [] for p in BSD.primes: if p == 2: continue if galrep.is_surjective(p): kolyvagin_primes.append(p) for p in kolyvagin_primes: BSD.primes.remove(p) # apply other hypotheses which imply Kolyvagin's bound holds bounded_primes = [] D_K = rings.QuadraticField(D, 'a').disc() # Cha's hypothesis for p in BSD.primes: if p == 2: continue if D_K % p != 0 and BSD.N % (p**2) != 0 and galrep.is_irreducible(p): if verbosity > 0: print 'Kolyvagin\'s bound for p = %d applies by Cha.' % p if p in BSD.proof: BSD.proof[p].append('Cha') else: BSD.proof[p] = ['Cha'] kolyvagin_primes.append(p) # Stein et al replaced for p in BSD.primes: # the lemma about the vanishing of H^1 is false in Stein et al for p=5 and 11 # here is the correction from Lawson-Wuthrich. Especially Theorem 14 in # [LawsonWuthrich] above. if p in kolyvagin_primes or p == 2 or D_K % p == 0: continue crit_lw = False if p > 11 or p == 7: crit_lw = True elif p == 11: if BSD.N != 121 or BSD.curve.label() != "121c2": crit_lw = True elif galrep.is_irreducible(p): crit_lw = True else: phis = BSD.curve.isogenies_prime_degree(p) if len(phis) != 1: crit_lw = True else: C = phis[0].codomain() if p == 3: if BSD.curve.torsion_order() % p != 0 and C.torsion_order( ) % p != 0: crit_lw = True else: # p == 5 Et = BSD.curve.quadratic_twist(5) if Et.torsion_order() % p != 0 and C.torsion_order( ) % p != 0: crite_lw = True if crit_lw: if verbosity > 0: print( 'Kolyvagin\'s bound for p = %d applies by Lawson-Wuthrich' % p) kolyvagin_primes.append(p) if p in BSD.proof: BSD.proof[p].append('Lawson-Wuthrich') else: BSD.proof[p] = ['Lawson-Wuthrich'] for p in kolyvagin_primes: if p in BSD.primes: BSD.primes.remove(p) # apply Kolyvagin's bound primes_to_remove = [] for p in kolyvagin_primes: if p == 2: continue if p not in heegner_primes: ord_p_bound = 0 elif heegner_index is not None: # p must divide heegner_index ord_p_bound = 2 * heegner_index.ord(p) # Here Jetchev's results apply. m_max = max([ BSD.curve.tamagawa_number(q).ord(p) for q in BSD.N.prime_divisors() ]) if m_max > 0: if verbosity > 0: print 'Jetchev\'s results apply (at p = %d) with m_max =' % p, m_max if p in BSD.proof: BSD.proof[p].append(('Jetchev', m_max)) else: BSD.proof[p] = [('Jetchev', m_max)] ord_p_bound -= 2 * m_max else: # Heegner index is None for D in BSD.heegner_index_upper_bound: M = BSD.heegner_index_upper_bound[D] ord_p_bound = 0 while p**(ord_p_bound + 1) <= M**2: ord_p_bound += 1 # now ord_p_bound is one on I_K!!! ord_p_bound *= 2 # by Kolyvagin, now ord_p_bound is one on #Sha break if p in BSD.proof: BSD.proof[p].append(('Kolyvagin', ord_p_bound)) else: BSD.proof[p] = [('Kolyvagin', ord_p_bound)] if BSD.sha_an.ord(p) == 0 and ord_p_bound == 0: if verbosity > 0: print 'True for p = %d by Kolyvagin bound' % p primes_to_remove.append(p) elif BSD.sha_an.ord(p) > ord_p_bound: raise RuntimeError( "p = %d: ord_p_bound == %d, but sha_an.ord(p) == %d. This appears to be a counterexample to BSD, but is more likely a bug." % (p, ord_p_bound, BSD.sha_an.ord(p))) else: # BSD.sha_an.ord(p) <= ord_p_bound != 0: if p in BSD.bounds: low = BSD.bounds[p][0] BSD.bounds[p] = (low, min(BSD.bounds[p][1], ord_p_bound)) else: BSD.bounds[p] = (0, ord_p_bound) for p in primes_to_remove: kolyvagin_primes.remove(p) BSD.primes = list(set(BSD.primes).union(set(kolyvagin_primes))) # Kato's bound if BSD.rank == 0 and not BSD.curve.has_cm(): L_over_Omega = BSD.curve.lseries().L_ratio() kato_primes = BSD.Sha.bound_kato() primes_to_remove = [] for p in BSD.primes: if p == 2: continue if p not in kato_primes: if verbosity > 0: print 'Kato further implies that #Sha[%d] is trivial.' % p primes_to_remove.append(p) if p in BSD.proof: BSD.proof[p].append(('Kato', 0)) else: BSD.proof[p] = [('Kato', 0)] if p not in [2, 3] and BSD.N % p != 0: if galrep.is_surjective(p): bd = L_over_Omega.valuation(p) if verbosity > 1: print 'Kato implies that ord_p(#Sha[%d]) <= %d ' % (p, bd) if p in BSD.proof: BSD.proof[p].append(('Kato', bd)) else: BSD.proof[p] = [('Kato', bd)] if p in BSD.bounds: low = BSD.bounds[p][0] BSD.bounds[p][1] = (low, min(BSD.bounds[p][1], bd)) else: BSD.bounds[p] = (0, bd) for p in primes_to_remove: BSD.primes.remove(p) # Mazur primes_to_remove = [] if BSD.N.is_prime(): for p in BSD.primes: if p == 2: continue if galrep.is_reducible(p): primes_to_remove.append(p) if verbosity > 0: print 'True for p=%s by Mazur' % p for p in primes_to_remove: BSD.primes.remove(p) if p in BSD.proof: BSD.proof[p].append('Mazur') else: BSD.proof[p] = ['Mazur'] BSD.primes.sort() # Try harder to compute the Heegner index, where it matters if heegner_index is None: if max_height < 18: max_height = 18 for D in BSD.heegner_index_upper_bound: M = BSD.heegner_index_upper_bound[D] for p in kolyvagin_primes: if p not in BSD.primes or p == 3: continue if verbosity > 0: print ' p = %d: Trying harder for Heegner index' % p obt = 0 while p**(BSD.sha_an.ord(p) / 2 + 1) <= M and max_height < 22: if verbosity > 2: print ' trying max_height =', max_height old_bound = M M, _, exact = BSD.curve.heegner_index_bound( D, max_height=max_height, secs_dc=secs_hi) if M == -1: max_height += 1 continue if exact is not False: heegner_index = exact BSD.heegner_indexes[D] = exact M = exact if verbosity > 2: print ' heegner index =', M else: M = max(M + [1]) if verbosity > 2: print ' bound =', M if old_bound == M: obt += 1 if obt == 2: break max_height += 1 BSD.heegner_index_upper_bound[D] = min( M, BSD.heegner_index_upper_bound[D]) low, upp = BSD.bounds[p] expn = 0 while p**(expn + 1) <= M: expn += 1 if 2 * expn < upp: upp = 2 * expn BSD.bounds[p] = (low, upp) if verbosity > 0: print ' got better bound on ord_p =', upp if low == upp: if upp != BSD.sha_an.ord(p): raise RuntimeError else: if verbosity > 0: print ' proven!' BSD.primes.remove(p) break for p in kolyvagin_primes: if p not in BSD.primes or p == 3: continue for D in BSD.curve.heegner_discriminants_list(4): if D in BSD.heegner_index_upper_bound: continue print ' discriminant', D if verbosity > 0: print 'p = %d: Trying discriminant = %d for Heegner index' % ( p, D) max_height = max( 10, BSD.curve.quadratic_twist(D).CPS_height_bound()) obt = 0 while True: if verbosity > 2: print ' trying max_height =', max_height old_bound = M if p**(BSD.sha_an.ord(p) / 2 + 1) > M or max_height >= 22: break M, _, exact = BSD.curve.heegner_index_bound( D, max_height=max_height, secs_dc=secs_hi) if M == -1: max_height += 1 continue if exact is not False: heegner_index = exact BSD.heegner_indexes[D] = exact M = exact if verbosity > 2: print ' heegner index =', M else: M = max(M + [1]) if verbosity > 2: print ' bound =', M if old_bound == M: obt += 1 if obt == 2: break max_height += 1 BSD.heegner_index_upper_bound[D] = M low, upp = BSD.bounds[p] expn = 0 while p**(expn + 1) <= M: expn += 1 if 2 * expn < upp: upp = 2 * expn BSD.bounds[p] = (low, upp) if verbosity > 0: print ' got better bound =', upp if low == upp: if upp != BSD.sha_an.ord(p): raise RuntimeError else: if verbosity > 0: print ' proven!' BSD.primes.remove(p) break # print some extra information if verbosity > 1: if len(BSD.primes) > 0: print 'Remaining primes:' for p in BSD.primes: s = 'p = ' + str(p) + ': ' if galrep.is_irreducible(p): s += 'ir' s += 'reducible, ' if not galrep.is_surjective(p): s += 'not ' s += 'surjective, ' a_p = BSD.curve.an(p) if BSD.curve.is_good(p): if a_p % p != 0: s += 'good ordinary' else: s += 'good, non-ordinary' else: assert BSD.curve.is_minimal() if a_p == 0: s += 'additive' elif a_p == 1: s += 'split multiplicative' elif a_p == -1: s += 'non-split multiplicative' if BSD.curve.tamagawa_product() % p == 0: s += ', divides a Tamagawa number' if p in BSD.bounds: s += '\n (%d <= ord_p <= %d)' % BSD.bounds[p] else: s += '\n (no bounds found)' s += '\n ord_p(#Sha_an) = %d' % BSD.sha_an.ord(p) if heegner_index is None: may_divide = True for D in BSD.heegner_index_upper_bound: if p > BSD.heegner_index_upper_bound[ D] or p not in kolyvagin_primes: may_divide = False if may_divide: s += '\n may divide the Heegner index, for which only a bound was computed' print s if BSD.curve.has_cm(): if BSD.rank == 1: BSD.proof['reason_finite'] = 'Rubin&Kolyvagin' else: BSD.proof['reason_finite'] = 'Rubin' else: BSD.proof['reason_finite'] = 'Kolyvagin' # reduce memory footprint of BSD object: BSD.curve = BSD.curve.label() BSD.Sha = None return BSD if return_BSD else BSD.primes
def series(self, n=2, quadratic_twist=+1, prec=5): r""" Returns the `n`-th approximation to the `p`-adic L-series as a power series in `T` (corresponding to `\gamma-1` with `\gamma=1+p` as a generator of `1+p\ZZ_p`). Each coefficient is a `p`-adic number whose precision is provably correct. Here the normalization of the `p`-adic L-series is chosen such that `L_p(J,1) = (1-1/\alpha)^2 L(J,1)/\Omega_J` where `\alpha` is the unit root INPUT: - ``n`` - (default: 2) a positive integer - ``quadratic_twist`` - (default: +1) a fundamental discriminant of a quadratic field, coprime to the conductor of the curve - ``prec`` - (default: 5) maximal number of terms of the series to compute; to compute as many as possible just give a very large number for ``prec``; the result will still be correct. ALIAS: power_series is identical to series. EXAMPLES: sage: J = J0(188)[0] sage: p = 7 sage: L = J.padic_lseries(p) sage: L.is_ordinary() True sage: f = L.series(2) sage: f[0] O(7^20) sage: f[1].norm() 3 + 4*7 + 3*7^2 + 6*7^3 + 5*7^4 + 5*7^5 + 6*7^6 + 4*7^7 + 5*7^8 + 7^10 + 5*7^11 + 4*7^13 + 4*7^14 + 5*7^15 + 2*7^16 + 5*7^17 + 7^18 + 7^19 + O(7^20) """ n = ZZ(n) if n < 1: raise ValueError, "n (=%s) must be a positive integer"%n if not self.is_ordinary(): raise ValueError, "p (=%s) must be an ordinary prime"%p # check if the conditions on quadratic_twist are satisfied D = ZZ(quadratic_twist) if D != 1: if D % 4 == 0: d = D//4 if not d.is_squarefree() or d % 4 == 1: raise ValueError, "quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D else: if not D.is_squarefree() or D % 4 != 1: raise ValueError, "quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D if gcd(D,self._p) != 1: raise ValueError, "quadratic twist (=%s) must be coprime to p (=%s) "%(D,self._p) if gcd(D,self._E.conductor())!= 1: for ell in prime_divisors(D): if valuation(self._E.conductor(),ell) > valuation(D,ell) : raise ValueError, "can not twist a curve of conductor (=%s) by the quadratic twist (=%s)."%(self._E.conductor(),D) p = self._p if p == 2 and self._normalize : print 'Warning : For p=2 the normalization might not be correct !' #verbose("computing L-series for p=%s, n=%s, and prec=%s"%(p,n,prec)) # bounds = self._prec_bounds(n,prec) # padic_prec = max(bounds[1:]) + 5 padic_prec = 10 # verbose("using p-adic precision of %s"%padic_prec) res_series_prec = min(p**(n-1), prec) verbose("using series precision of %s"%res_series_prec) ans = self._get_series_from_cache(n, res_series_prec,D) if not ans is None: verbose("found series in cache") return ans K = QQ gamma = K(1 + p) R = PowerSeriesRing(K,'T',res_series_prec) T = R(R.gen(),res_series_prec ) #L = R(0) one_plus_T_factor = R(1) gamma_power = K(1) teich = self.teichmuller(padic_prec) p_power = p**(n-1) # F = Qp(p,padic_prec) verbose("Now iterating over %s summands"%((p-1)*p_power)) verbose_level = get_verbose() count_verb = 0 alphas = self.alpha() #print len(alphas) Lprod = [] self._emb = 0 if len(alphas) == 2: split = True else: split = False for alpha in alphas: L = R(0) self._emb = self._emb + 1 for j in range(p_power): s = K(0) if verbose_level >= 2 and j/p_power*100 > count_verb + 3: verbose("%.2f percent done"%(float(j)/p_power*100)) count_verb += 3 for a in range(1,p): if split: # b = ((F.teichmuller(a)).lift() % ZZ(p**n)) b = (teich[a]) % ZZ(p**n) b = b*gamma_power else: b = teich[a] * gamma_power s += self.measure(b, n, padic_prec,D,alpha) L += s * one_plus_T_factor one_plus_T_factor *= 1+T gamma_power *= gamma Lprod = Lprod + [L] if len(Lprod)==1: return Lprod[0] else: return Lprod[0]*Lprod[1]
def new_submodule(self, p=None): """ Returns the new or p-new submodule of self. INPUT: - ``p`` - (default: None); if not None, return only the p-new submodule. OUTPUT: the new or p-new submodule of self, i.e. the intersection of the kernel of the degeneracy lowering maps to level `N/p` (for the given prime `p`, or for all prime divisors of `N` if `p` is not given). If self is cuspidal this is a Hecke-invariant complement of the corresponding old submodule, but this may break down on Eisenstein subspaces (see the amusing example in William Stein's book of a form which is new and old at the same time). EXAMPLES:: sage: m = ModularSymbols(33); m.rank() 9 sage: m.new_submodule().rank() 3 sage: m.new_submodule(3).rank() 4 sage: m.new_submodule(11).rank() 8 """ try: if self.__is_new[p]: return self except AttributeError: self.__is_new = {} except KeyError: pass if self.rank() == 0: self.__is_new[p] = True return self try: return self.__new_submodule[p] except AttributeError: self.__new_submodule = {} except KeyError: pass # Construct the degeneracy map d. N = self.level() d = None eps = self.character() if eps is None: f = 1 else: f = eps.conductor() if p is None: D = arith.prime_divisors(N) else: if N % p != 0: raise ValueError("p must divide the level.") D = [p] for q in D: # Here we are only using degeneracy *lowering* maps, so it is fine # to be careless and pass an integer for the level. One needs to be # a bit more careful with degeneracy *raising* maps for the Gamma1 # and GammaH cases. if ((N//q) % f) == 0: NN = N//q d1 = self.degeneracy_map(NN,1).matrix() if d is None: d = d1 else: d = d.augment(d1) d = d.augment(self.degeneracy_map(NN,q).matrix()) #end if #end for if d is None or d == 0: self.__is_new[p] = True return self else: self.__is_new[p] = False ns = self.submodule(d.kernel(), check=False) ns.__is_new = {p:True} ns._is_full_hecke_module = True self.__new_submodule[p] = ns return ns
def old_submodule(self, p=None): """ Returns the old or p-old submodule of self, i.e. the sum of the images of the degeneracy maps from level `N/p` (for the given prime `p`, or for all primes `p` dividing `N` if `p` is not given). INPUT: - ``p`` - (default: None); if not None, return only the p-old submodule. OUTPUT: the old or p-old submodule of self EXAMPLES:: sage: m = ModularSymbols(33); m.rank() 9 sage: m.old_submodule().rank() 7 sage: m.old_submodule(3).rank() 6 sage: m.new_submodule(11).rank() 8 :: sage: e = DirichletGroup(16)([-1, 1]) sage: M = ModularSymbols(e, 3, sign=1); M Modular Symbols space of dimension 4 and level 16, weight 3, character [-1, 1], sign 1, over Rational Field sage: M.old_submodule() Modular Symbols subspace of dimension 3 of Modular Symbols space of dimension 4 and level 16, weight 3, character [-1, 1], sign 1, over Rational Field Illustrate that :trac:`10664` is fixed:: sage: ModularSymbols(DirichletGroup(42)[7], 6, sign=1).old_subspace(3) Modular Symbols subspace of dimension 0 of Modular Symbols space of dimension 40 and level 42, weight 6, character [-1, -1], sign 1, over Rational Field """ try: if self.__is_old[p]: return self except AttributeError: self.__is_old = {} except KeyError: pass if self.rank() == 0: self.__is_old[p] = True return self try: return self.__old_submodule[p] except AttributeError: self.__old_submodule = {} except KeyError: pass # Construct the degeneracy map d. N = self.level() d = None eps = self.character() if eps is None: f = 1 else: f = eps.conductor() if p is None: D = arith.prime_divisors(N) else: if N % p != 0: raise ValueError("p must divide the level.") D = [p] for q in D: NN = N//q if NN % f == 0: M = self.hecke_module_of_level(NN) # Here it is vital to pass self as an argument to # degeneracy_map, because M and the level N don't uniquely # determine self (e.g. the degeneracy map from level 1 to level # N could go to Gamma0(N), Gamma1(N) or anything in between) d1 = M.degeneracy_map(self, 1).matrix() if d is None: d = d1 else: d = d.stack(d1) d = d.stack(M.degeneracy_map(self, q).matrix()) #end if #end for if d is None: os = self.zero_submodule() else: os = self.submodule(d.image(), check=False) self.__is_old[p] = (os == self) os.__is_old = {p:True} os._is_full_hecke_module = True self.__old_submodule[p] = os return os