Example #1
0
def slope_factors(f, vK, precision, reduce_function, slope_bound=0):
    r"""
    Return the slope factorizaton of a polynomial over a discretely valued field.

    INPUT:

    - ``f`` -- a monic polynomial over a field `K`
    - ``vK`` -- a discrete valuation on `K` such that `f` for which `f` is integral
    - ``precision`` -- a positive integer
    - ``reduce_function`` -- a function which takes as input an element of `K`
      and returns a simpler approximation; the approximation is with relative
      precision ``precision``
    - ``slope_bound`` -- a rational number (default: `0`)

    OUTPUT: a dictionary ``F`` whose keys are the pairwise distinct slopes `s_i`
    of the Newton polygon of `f` and the corresponding value is a monic and integral
    polynomial whose Newton polygon has exactly one slope `s_i`, and

    .. MATH::

            v_K( f - \prod_i f_i ) > N,

    where `N` is ``precision`` plus the valuation of the first nonzero
    coefficient of `f`.

    If ``slope_bound`` is given, then only those factors with slope < ``slope_bound``
    are computed.

    EXAMPLES::

        sage: from mclf.padic_extensions.slope_factors import slope_factors
        sage: from mclf.padic_extensions.fake_padic_completions import FakepAdicCompletion
        sage: from sage.all import GaussValuation
        sage: R.<x> = QQ[]
        sage: v2 = QQ.valuation(2)
        sage: Q2 = FakepAdicCompletion(QQ, v2)
        sage: f = (x - 2)*(x + 4) + 2^8
        sage: reduce_function = lambda g: Q2.reduce_polynomial(g, 5)
        sage: F = slope_factors(f, v2, 3, reduce_function)
        sage: F
        {-2: x + 4, -1: x + 30}
        sage: v = GaussValuation(R, v2)
        sage: v(f - F[-2]*F[-1])
        5


    """
    vK = vK.scale(1 / vK(vK.uniformizer()))
    assert not f.is_constant(), "f must be nonconstant"
    assert f.is_monic(), "f must be monic"
    NP = NewtonPolygon([(i, vK(f[i])) for i in range(f.degree() + 1)])
    slopes = NP.slopes(False)
    assert all(s <= 0 for s in slopes), "f must be integral"
    F = {}
    for i in range(len(slopes)):
        s = slopes[i]
        if s < slope_bound:
            g = factor_with_slope(f, vK, s, precision, reduce_function)
            F[s] = g
    return F
Example #2
0
    def upper_components(self):
        r"""
        Return the list of all upper components lying above this lower component.

        This lower component corresponds to a discrete valuation `v` on a rational
        function field `L(x)` extending the valuation `v_L`, where `L/K` is some
        finite extension of the base field `K`. The upper components correspond
        to the extensions of v to the function field of `Y_L` (which is a finite
        extension of `L(x)`).

        Since the computation of all extensions of a nonstandard valuation on a
        function field to a finite extension is not yet part of Sage, we have
        to appeal to the MacLane algorithm ourselves.

        EXAMPLES:

        This example shows that extending valuations also works if the equation
        is not integral wrt the valuation v ::

            sage: from mclf import *
            sage: R.<x> = QQ[]
            sage: Y = SuperellipticCurve(5*x^3 + 1, 2)
            sage: Y2 = SemistableModel(Y, QQ.valuation(5))
            sage: Y2.is_semistable()  # indirect doctest
            True

        """
        from sage.geometry.newton_polygon import NewtonPolygon
        from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
        v = self.valuation()
        FY = self.reduction_tree().curve().function_field()
        FYL = base_change_of_function_field(FY, self.base_field())
        FXL = FYL.rational_function_field()
        assert v.domain() == FXL
        G = FYL.polynomial()
        # now FYL = FXL(y| G(y) = 0)

        # we first have to make G integral with respect to v
        np = NewtonPolygon([(i,v(G[i])) for i in range(G.degree() + 1)])
        r = np.slopes()[-1]   # the largest slope
        if r <= 0:      # G is integral w.r.t. v
            upper_valuations = [FYL.valuation(w)
                for w in v.mac_lane_approximants(FYL.polynomial(), require_incomparability=True)]
        else:           # G is not integral
            vK = self.reduction_tree().base_valuation()
            pi = vK.uniformizer()           # we construct a function field FYL1
            k = QQ(r/v(pi)).ceil()          # isomorphic to FYL, but with
            R1 = PolynomialRing(FXL, 'y1')  # integral equation G_1
            y1 = R1.gen()
            G1 = G(pi**(-k)*y1).monic()
            assert all([v(c) >= 0 for c in G1.coefficients()]), "new G is not integral!"
            FYL1 = FXL.extension(G1, 'y1')
            y1 = FYL1.gen()
            V = v.mac_lane_approximants(G1, require_incomparability=True)
            V = [FYL1.valuation(w) for w in V]   # the extensions of v to FYL1
            upper_valuations = [FYL.valuation((w,
                    FYL.hom(y1/pi**k), FYL1.hom(pi**k*FYL.gen()))) for w in V]
                                                 # made into valuations on FYL
        return [UpperComponent(self, w) for w in upper_valuations]
Example #3
0
    def simplify_irreducible_polynomial(self, f):
        r"""
        Return a simpler polynomial generating the same extension.

        INPUT:

        - ``f`` -- an univariate polynomial over the underlying number field `K`
          which is integral and irreducible over `\hat{K}`

        OUTPUT:

        A polynomial `g` over `K` which is irreducible over `\hat{K}`,
        and which generates the same extension of `\hat{K}` as `f`.

        """
        R = f.parent()
        x = R.gen()
        assert R.base_ring() == self.number_field(), "f must be defined over K"
        # first we see if we can normalize f such that the unique slope of the
        # Newton polygon is >-1 and <=0.
        vK = self.valuation()
        NP = NewtonPolygon([(i, vK(f[i])) for i in range(f.degree() + 1)])
        slopes = NP.slopes(repetition=False)
        assert len(slopes) == 1, "f is not irreducible over the completion!"
        s = slopes[0]
        assert s <= 0, "f is not integral"
        if s <= -1:
            m = (-s).floor()
            pi = self.uniformizer()
            f = f(pi**m * x).monic()
        # Now we simplify the coefficients of f
        N = vK(f[0]).ceil() + 1
        n = f.degree()
        while True:
            g = R([self.reduce(f[i], N) for i in range(n + 1)])
            if g.is_squarefree() and self.is_approximate_irreducible_factor(
                    g, f):
                return g
            else:
                N = N + 1
Example #4
0
def _good_approximation(w, v):
    r""" Return a good approximation of this extension of a discrete valuation.

    INPUT:

    - ``w`` -- a discrete valuation on a field `L`, which is a finite extension
               of a field `K`
    - ``v`` -- the restriction of `w` to `K`

    OUTPUT: a discrete valuation on the polynomial ring over `K`, which
            is a 'good' approximation of `w`.

    Let `\alpha` denote the generator of the extension `L/K`, and `f\in K[x]`
    the minimal polynomial of `\alpha`. Also, let `v` denote the restriction
    of `w` to `K`. The finitely many extension of `v` to `L` correspond to the
    irreducible factors of `f` over the completion of `K` with respect to `v`.
    Write `g` for the factor corresponding to the given extension `w`.

    An *approximation* of `w` is a discrete valuation `\tilde{w}` on `K[x]` with
    the following properties:

    - the restriction of `\tilde{w}` to `K` is equal to `v`
    - `\tilde{w}(x)\geq 0`
    - `\tilde{w}(h) \leq w(h)`, for all `h\in K[x]`

    An approximation `\tilde{w}` is called *selective* if it is not an
    approximation for any other extension of `v` to `L` than `w`.

    Let `\tilde{w}` be a selective approximation of `w`. It is of the form

    MATH::

        \tilde{w} = [w_0, w_1(\phi_1)=\lambda_1,\ldots,w_n(\phi_n)=\lambda_n].

    Write `\phi:=\phi_n`. The approximation `\tilde{w}` is called *good* if

    - `\phi` has the same degree as the factor `g` of `f`, and
    - `\phi` has a root `\beta` (in the algebraic closure of the completion of
      `K`) which is closer to `\alpha` then any root of `f` different from `\alpha`

    It then follows from Krasner's Lemma that `\phi` generates the same extension
    of the completion of `K` than `g`, i.e.

    MATH::

        \hat{K}[\alpha]\cong\hat{K}[\beta].

    """
    from sage.geometry.newton_polygon import NewtonPolygon

    L = w.domain()
    alpha = L.gen()
    f = alpha.minpoly()
    x = f.parent().gen()
    F = f(alpha + x).shift(-1)
    np = NewtonPolygon([(i, w(F[i])) for i in range(F.degree() + 1)])
    mu = -np.slopes()[0]

    wt = _some_approximation(w, v)
    f = wt.domain()(f)
    assert hasattr(wt, "phi"), "wt = {}, L = {}".format(wt, L)
    while w(wt.phi()(alpha)) <= mu * wt.phi().degree():
        wt = wt.mac_lane_step(f)[0]
        """
        try:
            wt = wt.mac_lane_step(f)[0]
        except:
            print("wt = ", wt)
            print("domain = ", wt.domain())
            raise ValueError()
        """
    return wt
Example #5
0
    def is_approximate_irreducible_factor(self, g, f, v=None):
        r"""
        Check whether ``g`` is an approximate irreducible factor of ``f``.

        INPUT:

        - ``g``: univariate polynomial over the underlying number field `K_0`
        - ``f``: univariate polynomial over `K_0`
        - ``v``: a MacLane valuation on `K_0[x]` approximating ``g``, or ``None``;
          here *approximating* means that ``LimitValuation(v, g)`` is well-defined.

        Output: True if ``g`` is an approximate irreducible factor of f, i.e.
        if g is irreducible over `K` and Krasner's condition is satified,
        If true, the stem field of ``g`` over `K` is a subfield of the
        splitting field of ``f`` over `K`.

        Her we say that *Krasner's Condition* holds if for some root `\alpha`
        of `g` there exists a root `\beta` of `f` such that `\alpha` is `p`-adically
        closer to `\beta` than to any other root of `g`.

        Note that if `\deg(g)=1` then the condition is nontrivial, even though
        the conclusion from Krasner's Lemma is trivial.

        """
        K = self.number_field()
        vK = self.valuation()
        assert K.has_coerce_map_from(f.parent().base_ring())
        f = f.change_ring(K)
        R = f.parent()
        assert K.has_coerce_map_from(g.parent().base_ring())
        g = R(g)
        assert g.is_monic(), 'g has to be monic'
        f = f.monic()
        if g == f:
            return True
        x = R.gen()

        if g.degree() == 1:             # the case deg(g)=1 is different
            alpha = -g[0]               # the unique root of g
            F = f(x+alpha)
            if F[0] == 0:               # alpha is an exact root of f
                return True
            np_f = NewtonPolygon([(i, vK(F[i])) for i in range(F.degree()+1)])
            # the slopes correspond to vK(alpha-beta), beta the roots of f
            return ( len(np_f.vertices())>1 and np_f.vertices()[1][0]==1 and
                    np_f.slopes()[0]<0 )

        # now deg(g)>1
        if v == None:
            V = vK.mac_lane_approximants(g)
            if len(V) != 1:
                return False   # g is not irreducible
            v = V[0]
        v = LimitValuation(v, g)
        # if v(f) == Infinity:
        if (v._G).divides(f):
            return True
        # the valuation v has the property
        #        v(h)=vK(h(alpha))
        # for all h in K[x], where alpha is a root of g

        S = PolynomialRing(R, 'T')
        G = g(x + S.gen()).shift(-1)
        np_g = NewtonPolygon([(i, v(G[i])) for i in range(G.degree()+1)])
        # the slopes of np_g correspond to the valuations of
        # alpha-alpha_i, where alpha_i runs over all roots of
        # g distinct from alpha

        F = f(x + S.gen())
        np_f = NewtonPolygon([(i, v(F[i])) for i in range(F.degree()+1)])
        # the slopes of np_f correspond to the valuations of
        # alpha-beta, where beta runs over all roots of f

        result = min(np_g.slopes()) > min(np_f.slopes())
        # this is true if there is a root beta of f
        # such that vK(alpha-beta)>vK(alpha-alpha_i) for all i
        return result