Exemple #1
0
    def cardinality(self):
        r"""
        Return the number of words in the shuffle product
        of ``w1`` and ``w2``.

        This is understood as a multiset cardinality, not as a
        set cardinality; it does not count the distinct words only.

        It is given by `\binom{l_1+l_2}{l_1}`, where `l_1` is the
        length of ``w1`` and where `l_2` is the length of ``w2``.

        EXAMPLES::

            sage: from sage.combinat.words.shuffle_product import ShuffleProduct_w1w2
            sage: w, u = map(Words("abcd"), ["ab", "cd"])
            sage: S = ShuffleProduct_w1w2(w,u)
            sage: S.cardinality()
            6

            sage: w, u = map(Words("ab"), ["ab", "ab"])
            sage: S = ShuffleProduct_w1w2(w,u)
            sage: S.cardinality()
            6
        """
        return binomial(self._w1.length()+self._w2.length(), self._w1.length())
Exemple #2
0
    def cardinality(self):
        r"""
        Return the number of words in the shuffle product
        of ``w1`` and ``w2``.

        This is understood as a multiset cardinality, not as a
        set cardinality; it does not count the distinct words only.

        It is given by `\binom{l_1+l_2}{l_1}`, where `l_1` is the
        length of ``w1`` and where `l_2` is the length of ``w2``.

        EXAMPLES::

            sage: from sage.combinat.words.shuffle_product import ShuffleProduct_w1w2
            sage: w, u = map(Words("abcd"), ["ab", "cd"])
            sage: S = ShuffleProduct_w1w2(w,u)
            sage: S.cardinality()
            6

            sage: w, u = map(Words("ab"), ["ab", "ab"])
            sage: S = ShuffleProduct_w1w2(w,u)
            sage: S.cardinality()
            6
        """
        return binomial(self._w1.length() + self._w2.length(),
                        self._w1.length())
Exemple #3
0
    def unrank(self, r):
        """
        EXAMPLES::

            sage: c = Combinations([1,2,3])
            sage: c.list() == map(c.unrank, range(c.cardinality()))
            True
        """
        k = 0
        n = len(self.mset)
        b = binomial(n, k)
        while r >= b:
            r -= b
            k += 1
            b = binomial(n,k)

        return map(lambda i: self.mset[i], from_rank(r, n, k))
Exemple #4
0
    def unrank(self, r):
        """
        EXAMPLES::
        
            sage: c = Combinations([1,2,3])
            sage: c.list() == map(c.unrank, range(c.cardinality()))
            True
        """
        k = 0
        n = len(self.mset)
        b = binomial(n, k)
        while r >= b:
            r -= b
            k += 1
            b = binomial(n, k)

        return map(lambda i: self.mset[i], from_rank(r, n, k))
Exemple #5
0
    def by_taylor_expansion(self, fs, k):
        """
        We combine the theta decomposition and the heat operator as in [Sko].
        This yields a bijections of Jacobi forms of weight `k` and
        `M_k \times S_{k+2} \times .. \times S_{k+2m}`.
        
        NOTE:

            To make ``phi_divs`` integral we introduce an extra factor
            `2^{\mathrm{index}} * \mathrm{factorial}(k + 2 * \mathrm{index} - 1)`.
        """
        ## we introduce an abbreviations
        p = self.__p
        PS = self.power_series_ring()

        if not len(fs) == self.__precision.jacobi_index() + 1:
            raise ValueError(
                "fs must be a list of m + 1 elliptic modular forms or their fourier expansion"
            )

        qexp_prec = self._qexp_precision()
        if qexp_prec is None:  # there are no forms below the precision
            return dict()

        f_divs = dict()
        for (i, f) in enumerate(fs):
            f_divs[(i, 0)] = PS(f(qexp_prec), qexp_prec)

        for i in xrange(self.__precision.jacobi_index() + 1):
            for j in xrange(1, self.__precision.jacobi_index() - i + 1):
                f_divs[(i, j)] = f_divs[(i, j - 1)].derivative().shift(1)

        phi_divs = list()
        for i in xrange(self.__precision.jacobi_index() + 1):
            ## This is the formula in Skoruppas thesis. He uses d/ d tau instead of d / dz which yields
            ## a factor 4 m
            phi_divs.append(
                sum(f_divs[(j, i - j)] *
                    (4 * self.__precision.jacobi_index())**i * binomial(i, j) *
                    (2**self.index() // 2**i) * prod(2 * (i - l) + 1
                                                     for l in xrange(1, i)) *
                    (factorial(k + 2 * self.index() - 1) //
                     factorial(i + k + j - 1)) *
                    factorial(2 * self.__precision.jacobi_index() + k - 1)
                    for j in xrange(i + 1)))

        phi_coeffs = dict()
        for r in xrange(self.index() + 1):
            series = sum(map(operator.mul, self._theta_factors()[r], phi_divs))
            series = self._eta_factor() * series

            for n in xrange(qexp_prec):
                phi_coeffs[(n, r)] = int(series[n].lift()) % p

        return phi_coeffs
 def cardinality(self):
     """
     TESTS::
     
         sage: SC4 = SignedCompositions(4)
         sage: SC4.cardinality() == len(SC4.list())
         True
         sage: SignedCompositions(3).cardinality()
         18
     """
     return sum([ binomial(self.n-1, i-1)*2**(i) for i in range(1, self.n+1)])
Exemple #7
0
 def cardinality(self):
     """
     TESTS::
     
         sage: SC4 = SignedCompositions(4)
         sage: SC4.cardinality() == len(SC4.list())
         True
         sage: SignedCompositions(3).cardinality()
         18
     """
     return sum([
         binomial(self.n - 1, i - 1) * 2**(i) for i in range(1, self.n + 1)
     ])
    def by_taylor_expansion(self, fs, k) :
        """
        We combine the theta decomposition and the heat operator as in [Sko].
        This yields a bijections of Jacobi forms of weight `k` and
        `M_k \times S_{k+2} \times .. \times S_{k+2m}`.
        
        NOTE:

            To make ``phi_divs`` integral we introduce an extra factor
            `2^{\mathrm{index}} * \mathrm{factorial}(k + 2 * \mathrm{index} - 1)`.
        """
        ## we introduce an abbreviations
        p = self.__p
        PS = self.power_series_ring()
            
        if not len(fs) == self.__precision.jacobi_index() + 1 :
            raise ValueError("fs must be a list of m + 1 elliptic modular forms or their fourier expansion")
        
        qexp_prec = self._qexp_precision()
        if qexp_prec is None : # there are no forms below the precision
            return dict()
        
        f_divs = dict()
        for (i, f) in enumerate(fs) :
            f_divs[(i, 0)] = PS(f(qexp_prec), qexp_prec)
                
        for i in xrange(self.__precision.jacobi_index() + 1) :
            for j in xrange(1, self.__precision.jacobi_index() - i + 1) :
                f_divs[(i,j)] = f_divs[(i, j - 1)].derivative().shift(1)
            
        phi_divs = list()
        for i in xrange(self.__precision.jacobi_index() + 1) :
            ## This is the formula in Skoruppas thesis. He uses d/ d tau instead of d / dz which yields
            ## a factor 4 m
            phi_divs.append( sum( f_divs[(j, i - j)] * (4 * self.__precision.jacobi_index())**i
                                  * binomial(i,j) * ( 2**self.index() // 2**i)
                                  * prod(2*(i - l) + 1 for l in xrange(1, i))
                                  * (factorial(k + 2*self.index() - 1) // factorial(i + k + j - 1))
                                  * factorial(2*self.__precision.jacobi_index() + k - 1)
                                  for j in xrange(i + 1) ) )
            
        phi_coeffs = dict()
        for r in xrange(self.index() + 1) :
            series = sum( map(operator.mul, self._theta_factors()[r], phi_divs) )
            series = self._eta_factor() * series

            for n in xrange(qexp_prec) :
                phi_coeffs[(n, r)] = int(series[n].lift()) % p

        return phi_coeffs
Exemple #9
0
    def rank(self, x):
        """
        EXAMPLES::

            sage: c = Combinations([1,2,3])
            sage: range(c.cardinality()) == map(c.rank, c)
            True
        """
        x = map(self.mset.index, x)
        r = 0
        n = len(self.mset)
        for i in range(len(x)):
            r += binomial(n, i)
        r += rank(x, n)
        return r
Exemple #10
0
 def rank(self, x):
     """
     EXAMPLES::
     
         sage: c = Combinations([1,2,3])
         sage: range(c.cardinality()) == map(c.rank, c)
         True
     """
     x = map(self.mset.index, x)
     r = 0
     n = len(self.mset)
     for i in range(len(x)):
         r += binomial(n, i)
     r += rank(x, n)
     return r
Exemple #11
0
    def cardinality(self):
        """
        Returns the number of words in the shuffle product
        of w1 and w2.

        It is given by binomial(len(w1)+len(w2), len(w1)).

        EXAMPLES::

            sage: from sage.combinat.words.shuffle_product import ShuffleProduct_w1w2
            sage: w, u = map(Words("abcd"), ["ab", "cd"])
            sage: S = ShuffleProduct_w1w2(w,u)
            sage: S.cardinality()
            6
         """
        return binomial(self._w1.length()+self._w2.length(), self._w1.length())
Exemple #12
0
    def cardinality(self):
        """
        Returns the number of words in the shuffle product
        of w1 and w2.

        It is given by binomial(len(w1)+len(w2), len(w1)).

        EXAMPLES::

            sage: from sage.combinat.words.shuffle_product import ShuffleProduct_w1w2
            sage: w, u = map(Words("abcd"), ["ab", "cd"])
            sage: S = ShuffleProduct_w1w2(w,u)
            sage: S.cardinality()
            6
         """
        return binomial(self._w1.length() + self._w2.length(),
                        self._w1.length())
    def _frobenius_coefficient_bound(self):
        """
        Computes bound on number of p-adic digits needed to recover
        frobenius polynomial, i.e. returns B so that knowledge of
        a_1, ..., a_g modulo p^B determine frobenius polynomial uniquely.

        TESTS::

            sage: R.<t> = PolynomialRing(GF(37))
            sage: HyperellipticCurve(t^3 + t + 1)._frobenius_coefficient_bound()
            1
            sage: HyperellipticCurve(t^5 + t + 1)._frobenius_coefficient_bound()
            2
            sage: HyperellipticCurve(t^7 + t + 1)._frobenius_coefficient_bound()
            3

            sage: R.<t> = PolynomialRing(GF(next_prime(10^9)))
            sage: HyperellipticCurve(t^3 + t + 1)._frobenius_coefficient_bound()
            1
            sage: HyperellipticCurve(t^5 + t + 1)._frobenius_coefficient_bound()
            2
            sage: HyperellipticCurve(t^7 + t + 1)._frobenius_coefficient_bound()
            2
            sage: HyperellipticCurve(t^9 + t + 1)._frobenius_coefficient_bound()
            3
            sage: HyperellipticCurve(t^11 + t + 1)._frobenius_coefficient_bound()
            3
            sage: HyperellipticCurve(t^13 + t + 1)._frobenius_coefficient_bound()
            4
        """
        assert self.base_ring().is_finite()
        p = self.base_ring().characteristic()
        q = self.base_ring().order()
        sqrtq = RR(q).sqrt()
        g = self.genus()

        # note: this bound is from Kedlaya's paper, but he tells me it's not
        # the best possible
        M = 2 * binomial(2 * g, g) * sqrtq**g
        B = ZZ(M.ceil()).exact_log(p)
        if p**B < M:
            B += 1
        return B
    def _frobenius_coefficient_bound(self):
        """
        Computes bound on number of p-adic digits needed to recover
        frobenius polynomial, i.e. returns B so that knowledge of
        a_1, ..., a_g modulo p^B determine frobenius polynomial uniquely.

        TESTS::

            sage: R.<t> = PolynomialRing(GF(37))
            sage: HyperellipticCurve(t^3 + t + 1)._frobenius_coefficient_bound()
            1
            sage: HyperellipticCurve(t^5 + t + 1)._frobenius_coefficient_bound()
            2
            sage: HyperellipticCurve(t^7 + t + 1)._frobenius_coefficient_bound()
            3

            sage: R.<t> = PolynomialRing(GF(next_prime(10^9)))
            sage: HyperellipticCurve(t^3 + t + 1)._frobenius_coefficient_bound()
            1
            sage: HyperellipticCurve(t^5 + t + 1)._frobenius_coefficient_bound()
            2
            sage: HyperellipticCurve(t^7 + t + 1)._frobenius_coefficient_bound()
            2
            sage: HyperellipticCurve(t^9 + t + 1)._frobenius_coefficient_bound()
            3
            sage: HyperellipticCurve(t^11 + t + 1)._frobenius_coefficient_bound()
            3
            sage: HyperellipticCurve(t^13 + t + 1)._frobenius_coefficient_bound()
            4
        """
        assert self.base_ring().is_finite()
        p = self.base_ring().characteristic()
        q = self.base_ring().order()
        sqrtq = RR(q).sqrt()
        g = self.genus()

        # note: this bound is from Kedlaya's paper, but he tells me it's not
        # the best possible
        M = 2 * binomial(2*g, g) * sqrtq**g
        B = ZZ(M.ceil()).exact_log(p)
        if p**B < M:
            B += 1
        return B
Exemple #15
0
    def cardinality(self):
        r"""
        Return the number of elements in ``self``.

        The number of signed compositions of `n` is equal to

        .. MATH::

            \sum_{i=1}^{n+1} \binom{n-1}{i-1} 2^i

        TESTS::

            sage: SC4 = SignedCompositions(4)
            sage: SC4.cardinality() == len(SC4.list())
            True
            sage: SignedCompositions(3).cardinality()
            18
        """
        return sum([binomial(self.n-1, i-1)*2**(i) for i in range(1, self.n+1)])
    def cardinality(self):
        r"""
        Return the number of elements in ``self``.

        The number of signed compositions of `n` is equal to

        .. MATH::

            \sum_{i=1}^{n+1} \binom{n-1}{i-1} 2^i

        TESTS::

            sage: SC4 = SignedCompositions(4)
            sage: SC4.cardinality() == len(SC4.list())
            True
            sage: SignedCompositions(3).cardinality()
            18
        """
        return sum([
            binomial(self.n - 1, i - 1) * 2**(i) for i in range(1, self.n + 1)
        ])
def saturation(A, proof=True, p=0, max_dets=5):
    """
    Compute a saturation matrix of A.
    
    INPUT:
        A     -- a matrix over ZZ
        proof -- bool (default: True)
        p     -- int (default: 0); if not 0
                 only guarantees that output is p-saturated
        max_dets -- int (default: 4) max number of dets of
                 submatrices to compute. 
        
    OUTPUT:
        matrix -- saturation of the matrix A.

    EXAMPLES:
        sage: from sage.matrix.matrix_integer_dense_saturation import saturation
        sage: A = matrix(ZZ, 2, 2, [3,2,3,4]); B = matrix(ZZ, 2,3,[1,2,3,4,5,6]); C = A*B
        sage: C
        [11 16 21]
        [19 26 33]
        sage: C.index_in_saturation()
        18
        sage: S = saturation(C); S
        [11 16 21]
        [-2 -3 -4]
        sage: S.index_in_saturation()
        1
        sage: saturation(C, proof=False)
        [11 16 21]
        [-2 -3 -4]
        sage: saturation(C, p=2)
        [11 16 21]
        [-2 -3 -4]
        sage: saturation(C, p=2, max_dets=1)
        [11 16 21]
        [-2 -3 -4]
    """
    # Find a submatrix of full rank and instead saturate that matrix.
    r = A.rank()
    if A.is_square() and r == A.nrows():
        return identity_matrix(ZZ, r)
    if A.nrows() > r:
        P = []
        while len(P) < r:
            P = matrix_integer_dense_hnf.probable_pivot_rows(A)
        A = A.matrix_from_rows(P)

    # Factor out all common factors from all rows, just in case.
    A = copy(A)
    A._factor_out_common_factors_from_each_row()

    if A.nrows() <= 1:
        return A

    A, zero_cols = A._delete_zero_columns()

    if max_dets > 0:
        # Take the GCD of at most num_dets randomly chosen determinants.
        nr = A.nrows(); nc = A.ncols()
        d = 0
        trials = min(binomial(nc, nr), max_dets)
        already_tried = []
        while len(already_tried) < trials:
            v = random_sublist_of_size(nc, nr)
            tm = verbose('saturation -- checking det condition on submatrix')
            d = gcd(d, A.matrix_from_columns(v).determinant(proof=proof))
            verbose('saturation -- got det down to %s'%d, tm)
            if gcd(d, p) == 1:
                return A._insert_zero_columns(zero_cols)
            already_tried.append(v)

        if gcd(d, p) == 1:
            # already p-saturated
            return A._insert_zero_columns(zero_cols)

        # Factor and p-saturate at each p.
        # This is not a good algorithm, because all the HNF's in it are really slow!
        #
        #tm = verbose('factoring gcd %s of determinants'%d)
        #limit = 2**31-1
        #F = d.factor(limit = limit)
        #D = [p for p, e in F if p <= limit]
        #B = [n for n, e in F if n > limit]  # all big factors -- there will only be at most one
        #assert len(B) <= 1
        #C = B[0]
        #for p in D:
        #    A = p_saturation(A, p=p, proof=proof)

    # This is a really simple but powerful algorithm.
    # FACT: If A is a matrix of full rank, then hnf(transpose(A))^(-1)*A is a saturation of A.
    # To make this practical we use solve_system_with_difficult_last_row, since the
    # last column of HNF's are typically the only really big ones.
    B = A.transpose().hermite_form(include_zero_rows=False, proof=proof)
    B = B.transpose()

    # Now compute B^(-1) * A
    C = solve_system_with_difficult_last_row(B, A)
    return C.change_ring(ZZ)._insert_zero_columns(zero_cols)
    def by_taylor_expansion(self, fs, k) :
        r"""
        We combine the theta decomposition and the heat operator as in the
        thesis of Nils Skoruppa. This yields a bijections of the space of weak
        Jacobi forms of weight `k` and index `m` with the product of spaces
        of elliptic modular forms `M_k \times S_{k+2} \times .. \times S_{k+2m}`.

        INPUT:
        
        - ``fs`` -- A list of functions that given an integer `p` return the
                    q-expansion of a modular form with rational coefficients
                    up to precision `p`.  These modular forms correspond to
                    the components of the above product.
        
        - `k` -- An integer. The weight of the weak Jacobi form to be computed.
        
        NOTE:

            In order to make ``phi_divs`` integral we introduce an extra factor
            `2^{\mathrm{index}} * \mathrm{factorial}(k + 2 * \mathrm{index} - 1)`.
        """
        ## we introduce an abbreviations
        p = self.__p
        PS = self.power_series_ring()
            
        if not len(fs) == self.__precision.jacobi_index() + 1 :
            raise ValueError( "fs (which has length {0}) must be a list of {1} Fourier expansions" \
                              .format(len(fs), self.__precision.jacobi_index() + 1) )

        qexp_prec = self._qexp_precision()
        if qexp_prec is None : # there are no forms below the precision
            return dict()
        
        f_divs = dict()
        for (i, f) in enumerate(fs) :
            f_divs[(i, 0)] = PS(f(qexp_prec), qexp_prec)
                
        for i in xrange(self.__precision.jacobi_index() + 1) :
            for j in xrange(1, self.__precision.jacobi_index() - i + 1) :
                f_divs[(i,j)] = f_divs[(i, j - 1)].derivative().shift(1)
            
        phi_divs = list()
        for i in xrange(self.__precision.jacobi_index() + 1) :
            ## This is the formula in Skoruppas thesis. He uses d/ d tau instead of d / dz which yields
            ## a factor 4 m
            phi_divs.append( sum( f_divs[(j, i - j)] * (4 * self.__precision.jacobi_index())**i
                                  * binomial(i,j) * ( 2**self.index() // 2**i)
                                  * prod(2*(i - l) + 1 for l in xrange(1, i))
                                  * (factorial(k + 2*self.index() - 1) // factorial(i + k + j - 1))
                                  * factorial(2*self.__precision.jacobi_index() + k - 1)
                                  for j in xrange(i + 1) ) )
            
        phi_coeffs = dict()
        for r in xrange(self.index() + 1) :
            series = sum( map(operator.mul, self._theta_factors()[r], phi_divs) )
            series = self._eta_factor() * series

            for n in xrange(qexp_prec) :
                phi_coeffs[(n, r)] = int(series[n].lift()) % p

        return phi_coeffs
    def by_taylor_expansion(self, fs, k):
        r"""
        We combine the theta decomposition and the heat operator as in the
        thesis of Nils Skoruppa. This yields a bijections of the space of weak
        Jacobi forms of weight `k` and index `m` with the product of spaces
        of elliptic modular forms `M_k \times S_{k+2} \times .. \times S_{k+2m}`.

        INPUT:
        
        - ``fs`` -- A list of functions that given an integer `p` return the
                    q-expansion of a modular form with rational coefficients
                    up to precision `p`.  These modular forms correspond to
                    the components of the above product.
        
        - `k` -- An integer. The weight of the weak Jacobi form to be computed.
        
        NOTE:

            In order to make ``phi_divs`` integral we introduce an extra factor
            `2^{\mathrm{index}} * \mathrm{factorial}(k + 2 * \mathrm{index} - 1)`.
        """
        ## we introduce an abbreviations
        p = self.__p
        PS = self.power_series_ring()

        if not len(fs) == self.__precision.jacobi_index() + 1:
            raise ValueError( "fs (which has length {0}) must be a list of {1} Fourier expansions" \
                              .format(len(fs), self.__precision.jacobi_index() + 1) )

        qexp_prec = self._qexp_precision()
        if qexp_prec is None:  # there are no forms below the precision
            return dict()

        f_divs = dict()
        for (i, f) in enumerate(fs):
            f_divs[(i, 0)] = PS(f(qexp_prec), qexp_prec)

        for i in xrange(self.__precision.jacobi_index() + 1):
            for j in xrange(1, self.__precision.jacobi_index() - i + 1):
                f_divs[(i, j)] = f_divs[(i, j - 1)].derivative().shift(1)

        phi_divs = list()
        for i in xrange(self.__precision.jacobi_index() + 1):
            ## This is the formula in Skoruppas thesis. He uses d/ d tau instead of d / dz which yields
            ## a factor 4 m
            phi_divs.append(
                sum(f_divs[(j, i - j)] *
                    (4 * self.__precision.jacobi_index())**i * binomial(i, j) *
                    (2**self.index() // 2**i) * prod(2 * (i - l) + 1
                                                     for l in xrange(1, i)) *
                    (factorial(k + 2 * self.index() - 1) //
                     factorial(i + k + j - 1)) *
                    factorial(2 * self.__precision.jacobi_index() + k - 1)
                    for j in xrange(i + 1)))

        phi_coeffs = dict()
        for r in xrange(self.index() + 1):
            series = sum(map(operator.mul, self._theta_factors()[r], phi_divs))
            series = self._eta_factor() * series

            for n in xrange(qexp_prec):
                phi_coeffs[(n, r)] = int(series[n].lift()) % p

        return phi_coeffs
def saturation(A, proof=True, p=0, max_dets=5):
    """
    Compute a saturation matrix of A.

    INPUT:
        A     -- a matrix over ZZ
        proof -- bool (default: True)
        p     -- int (default: 0); if not 0
                 only guarantees that output is p-saturated
        max_dets -- int (default: 4) max number of dets of
                 submatrices to compute.

    OUTPUT:
        matrix -- saturation of the matrix A.

    EXAMPLES:
        sage: from sage.matrix.matrix_integer_dense_saturation import saturation
        sage: A = matrix(ZZ, 2, 2, [3,2,3,4]); B = matrix(ZZ, 2,3,[1,2,3,4,5,6]); C = A*B
        sage: C
        [11 16 21]
        [19 26 33]
        sage: C.index_in_saturation()
        18
        sage: S = saturation(C); S
        [11 16 21]
        [-2 -3 -4]
        sage: S.index_in_saturation()
        1
        sage: saturation(C, proof=False)
        [11 16 21]
        [-2 -3 -4]
        sage: saturation(C, p=2)
        [11 16 21]
        [-2 -3 -4]
        sage: saturation(C, p=2, max_dets=1)
        [11 16 21]
        [-2 -3 -4]
    """
    # Find a submatrix of full rank and instead saturate that matrix.
    r = A.rank()
    if A.is_square() and r == A.nrows():
        return identity_matrix(ZZ, r)
    if A.nrows() > r:
        P = []
        while len(P) < r:
            P = matrix_integer_dense_hnf.probable_pivot_rows(A)
        A = A.matrix_from_rows(P)

    # Factor out all common factors from all rows, just in case.
    A = copy(A)
    A._factor_out_common_factors_from_each_row()

    if A.nrows() <= 1:
        return A

    A, zero_cols = A._delete_zero_columns()

    if max_dets > 0:
        # Take the GCD of at most num_dets randomly chosen determinants.
        nr = A.nrows()
        nc = A.ncols()
        d = 0
        trials = min(binomial(nc, nr), max_dets)
        already_tried = []
        while len(already_tried) < trials:
            v = random_sublist_of_size(nc, nr)
            tm = verbose('saturation -- checking det condition on submatrix')
            d = gcd(d, A.matrix_from_columns(v).determinant(proof=proof))
            verbose('saturation -- got det down to %s' % d, tm)
            if gcd(d, p) == 1:
                return A._insert_zero_columns(zero_cols)
            already_tried.append(v)

        if gcd(d, p) == 1:
            # already p-saturated
            return A._insert_zero_columns(zero_cols)

        # Factor and p-saturate at each p.
        # This is not a good algorithm, because all the HNF's in it are really slow!
        #
        #tm = verbose('factoring gcd %s of determinants'%d)
        #limit = 2**31-1
        #F = d.factor(limit = limit)
        #D = [p for p, e in F if p <= limit]
        #B = [n for n, e in F if n > limit]  # all big factors -- there will only be at most one
        #assert len(B) <= 1
        #C = B[0]
        #for p in D:
        #    A = p_saturation(A, p=p, proof=proof)

    # This is a really simple but powerful algorithm.
    # FACT: If A is a matrix of full rank, then hnf(transpose(A))^(-1)*A is a saturation of A.
    # To make this practical we use solve_system_with_difficult_last_row, since the
    # last column of HNF's are typically the only really big ones.
    B = A.transpose().hermite_form(include_zero_rows=False, proof=proof)
    B = B.transpose()

    # Now compute B^(-1) * A
    C = solve_system_with_difficult_last_row(B, A)
    return C.change_ring(ZZ)._insert_zero_columns(zero_cols)