def NonCubicSet(K,S, verbose=False):
    u = -1 if K==QQ else  K(K.unit_group().torsion_generator())
    from KSp import IdealGenerator
    Sx = [u] + [IdealGenerator(P) for P in S]
    r = len(Sx)
    d123 = r + binomial(r,2) + binomial(r,3)
    vecP = vec123(K,Sx)
    A = Matrix(GF(2),0,d123)
    N = prod(S,1)

    primes = primes_iter(K,None)
    T = []
    while A.rank() < d123:
        p = primes.next()
        while p.divides(N):
            p = primes.next()
        v = vecP(p)
        if verbose:
            print("v={}".format(v))
        A1 = A.stack(vector(v))
        if A1.rank() > A.rank():
            A = A1
            T.append(p)
            if verbose:
                print("new A={} with {} rows and {} cols".format(A,A.nrows(),A.ncols()))
                print("T increases to {}".format(T))
    return T
Example #2
0
 def __init__(self, n, ieqs=None):
     self._n = n
     if ieqs:
         assert ieqs.ncols() == binomial(n,2)
         self._ieqs = ieqs
     else:
         self._ieqs = kalmanson_matrix(n)
Example #3
0
def MaxVraissLp():
    pspict, fig = SinglePicture("MaxVraissLp")
    pspict.dilatation(
        10
    )  # Mettre ceci plus bas (au-dessus de pspict.conclude()) produit un beau bogue :)

    x = var('x')
    f = phyFunction(binomial(10, 3) * x**3 * (1 - x)**7).graph(0, 1)

    M = f.get_point(0.3)
    Mx = M.projection(pspict.axes.single_axeX)
    l = Segment(M, Mx)
    l.parameters.style = "dotted"

    pspict.axes.single_axeX.put_mark(0.3, -90, "$p$", pspict=pspict)
    pspict.axes.single_axeY.put_mark(0.3,
                                     text="$L(p)$",
                                     pspict=pspict,
                                     position="E")
    pspict.axes.single_axeX.Dx = 0.3
    pspict.axes.single_axeY.Dx = 0.2

    pspict.DrawGraphs(M, f, l)
    pspict.DrawDefaultAxes()
    fig.conclude()
    fig.write_the_file()
Example #4
0
def make_alpha_beta(n, d, verbose=False):
    if debug:
        print("in make_alpha_beta(n,d) with n={}, d={}".format(n, d))
    if d < 0 or d > n or n == 1 or (n, d) in alpha_dict:
        if debug:
            print("nothing to do")
        return
    if debug:
        print("doing any necessary recursive calls...")
    # make values for smaller n, or same n and smaller d:
    for n1 in range(2, n):
        for d1 in range(1, n1 + 1):
            if debug:
                print("recursion for (n,d)=({},{})".format(n1, d1))
            make_alpha_beta(n1, d1, verbose)
    for d1 in range(1, d):
        if debug:
            print("recursion for (n,d)=({},{})".format(n, d1))
        make_alpha_beta(n, d1, verbose)

    if debug:
        print("... recursive calls done for (n,d)=({},{})".format(n, d))
    A = pp**(-n) * sum([
        alpha_sigma(n, d, sigma) * number_of_monics_with_splitting_type(sigma)
        for sigma in Phi(n) if sigma != [[1, n]]
    ])

    B = (pp - 1) * sum([
        pp**(-binomial(r + 1, 2)) *
        sum([pp**s * alpha(s, d) for s in range(r)]) for r in range(n)
    ])

    D = 1 - pp**(1 - binomial(n + 1, 2))
    assert D
    if debug:
        print("A = {}".format(A))
        print("B = {}".format(B))
        print("D = {}".format(D))

    a = (A + pp**(1 - n) * B) / D
    b = (pp**(-binomial(n, 2)) * A + B) / D
    if debug:
        print("(n,d)=({},{}): storing alpha={}, beta={}".format(n, d, a, b))
    alpha_dict[(n, d)] = a
    beta_dict[(n, d)] = b
Example #5
0
 def A_poset(n):
     from sage.all import binomial
     S = [1, 1, 2, 5, 15, 52, 203]
     while len(S) <= n + 1:
         m = len(S) - 1
         S.append(
             _reduce(lambda x, y: x + y[0] * binomial(m, y[1]),
                     zip(S, range(m + 1)), 0))
     return S[n + 1]
Example #6
0
 def betti_number(self, p, q):
     """
     Return the Betti number b_{p,q}. Either use general theory to
     determine this number or compute it using some b_l or c_l
     (whichever is optimal).
     """
     p = ZZ(p)
     q = ZZ(q)
     if q == 0 and p == 0:
         return ZZ(1)
     elif q == 1:
         return self._betti_number_row1(p)
     elif q == 2:
         b = self._betti_number_row1(p+1)
         N = self.N
         l = N - 2 - p
         diff = (N-1-l)*binomial(N-1, l-1) - 2*self.volume*binomial(N-3, l-1)
         return b - diff
     else:
         return ZZ(0)
Example #7
0
    def get_deg_auto(nss, nts=200, max_deg=7):
        if __debug__:
            assert nss >= 1, nss
            assert nts >= nss, (nts, nss)
            assert max_deg >= 1, max_deg

        for d in range(1, max_deg + 1):
            deg = d
            if binomial(nss + deg, deg) - 1 > nts:
                deg = d - 1
                break

        return deg
Example #8
0
    def get_deg_auto(nss, nts=200, max_deg=7):
        if __debug__:
            assert nss >= 1, nss
            assert nts >= nss, (nts, nss)
            assert max_deg >= 1, max_deg

        for d in range(1,max_deg+1):
            deg = d
            if binomial(nss+deg, deg) - 1 > nts:
                deg = d - 1
                break

        return deg
Example #9
0
    def _betti_number_row1(self, p):
        if p <= 0 or p > self.N - 3:
            return ZZ(0)

        N = self.N
        l = N - 1 - p

        if p <= self.hering_schenck_bound:
            return (N-1-l)*binomial(N-1, l-1) - 2*self.volume*binomial(N-3, l-1)

        # Compute via b or c?
        B = self.b_computer(p)
        C = self.c_computer(l)

        bdiff = (B.difficulty(), B.pq)
        cdiff = (C.difficulty(), C.pq)

        if bdiff < cdiff:
            B0 = self.b_computer_im(p)
            return B.ker() - B0.dom()
        else:
            diff = (N-1-l)*binomial(N-1, l-1) - 2*self.volume*binomial(N-3, l-1)
            return C.ker() + diff
Example #10
0
def kappa_coeff(sigma,kappa_0,F):
  #maybe not optimal to compute the target_partition twice here...
  #global kappa_coeff_dict
  mmm = F.degree()
  target_partition = []
  for i in range(1,mmm+1):
    for j in range(F[i]):
      target_partition.append(i)
  #key = (tuple(sigma),kappa_0,tuple(target_partition))
  #if kappa_coeff_dict.has_key(key):
  #  return kappa_coeff_dict[key]
  total = 0
  num_ones = sum(1 for i in sigma if i == 1)
  for i in range(0,num_ones+1):
    for injection in Permutations(list(range(len(target_partition))),len(sigma)-i):
      term = binomial(num_ones,i)*binomial(kappa_0 + len(target_partition) + i-1, i)*factorial(i)
      for j in range(len(sigma)-i):
        term *= C_coeff(sigma[j+i],target_partition[injection[j]])
      for j in range(len(target_partition)):
        if j in injection:
          continue
        term *= C_coeff(0,target_partition[j])
      total += term
  return (-1)**(len(target_partition)+len(sigma))*total * Rational((1,aut(target_partition)))
Example #11
0
 def pair_two_pols(self,p1,p2):
     res = 0
     cp1 = p1.coefficients(sparse=False)
     cp2 = p2.coefficients(sparse=False)
     for n in range(self._w+1):
         if n < len(cp1):
             c1 = cp1[n]
         else:
             c1 = 0
         k = self._w -n
         if k < len(cp2):
             c2 = cp2[k]
         else:
             c2 = 0
         term = c1*conjugate(c2)*(-1)**(self._w-n)/binomial(self._w,n)
         res = res + term
     return res/self._dim
Example #12
0
 def pair_two_pols(self, p1, p2):
     res = 0
     cp1 = p1.coefficients(sparse=False)
     cp2 = p2.coefficients(sparse=False)
     for n in range(self._w + 1):
         if n < len(cp1):
             c1 = cp1[n]
         else:
             c1 = 0
         k = self._w - n
         if k < len(cp2):
             c2 = cp2[k]
         else:
             c2 = 0
         term = c1 * conjugate(c2) * (-1)**(self._w - n) / binomial(
             self._w, n)
         res = res + term
     return res / self._dim
Example #13
0
 def _get_polynomial(self,n):
     r"""
     Compute P(f|A_n)(X)
     """
     if self._polynomials.has_key(n):
         return self._polynomials[n]
     CF = ComplexField(self._prec)
     X=CF['X'].gens()[0]
     k = self._k
     pols = {}
     ## For each component we get a polynomial
     p = X.parent().zero()
     for l in range(self._w+1):
         rk = self.get_rk(n,self._w-l)
         if self._verbose>0:
             print "rk=",rk
         fak = binomial(self._w,l)*(-1)**l
         p+=CF(fak)*CF(rk)*X**l
     self._polynomials[n]=p
     return p
Example #14
0
 def _get_polynomial(self, n):
     r"""
     Compute P(f|A_n)(X)
     """
     if self._polynomials.has_key(n):
         return self._polynomials[n]
     CF = ComplexField(self._prec)
     X = CF['X'].gens()[0]
     k = self._k
     pols = {}
     ## For each component we get a polynomial
     p = X.parent().zero()
     for l in range(self._w + 1):
         rk = self.get_rk(n, self._w - l)
         if self._verbose > 0:
             print "rk=", rk
         fak = binomial(self._w, l) * (-1)**l
         p += CF(fak) * CF(rk) * X**l
     self._polynomials[n] = p
     return p
Example #15
0
 def compute_estimate(p):
     iters = 1.* binomial(n, k)/ \
         sum( binomial(n-tau, k-i)*binomial(tau,i) for i in range(p+1) )
     estimate = iters*(T + \
         sum(P[pi] * (q-1)**pi * binomial(k, pi) for pi in range(p+1) ))
     return estimate
Example #16
0
def tbin(a, b):
    return (binomial(a - b + 1, b) - binomial(a - b - 1, b - 2)) * (-1)**b
Example #17
0
def hybrid_decoding_attack(n,
                           alpha,
                           q,
                           m,
                           secret_distribution,
                           beta,
                           tau=None,
                           mitm=True,
                           reduction_cost_model=est.BKZ.sieve):
    """
    Estimate cost of the Hybrid Attack,

    :param n: LWE dimension `n > 0`
    :param alpha: noise rate `0 ≤ α < 1`, noise will have standard deviation `αq/sqrt{2π}`
    :param q: modulus `0 < q`
    :param m: number of LWE samples `m > 0`
    :param secret_distribution: distribution of secret
    :param beta: BKZ block size β
    :param tau: guessing dimension τ
    :param mitm: simulate MITM approach (√ of search space)
    :param reduction_cost_model: BKZ reduction cost model

    EXAMPLE:

    hybrid_decoding_attack(beta = 100, tau = 250, mitm = True, reduction_cost_model = est.BKZ.sieve, **example_64())

         rop:   2^65.1
         pre:   2^64.8
        enum:   2^62.5
        beta:      100
         |S|:   2^73.1
        prob: 0.104533
       scale:   12.760
          pp:       11
           d:     1798
      repeat:       42

    """

    n, alpha, q = est.Param.preprocess(n, alpha, q)

    # d is the dimension of the attack lattice
    d = m + n - tau

    # h is the Hamming weight of the secret
    # NOTE: binary secrets are assumed to have Hamming weight ~n/2, ternary secrets ~2n/3
    # this aligns with the assumptions made in the LWE Estimator
    h = est.SDis.nonzero(secret_distribution, n=n)
    sd = alpha * q / sqrt(2 * pi)

    # compute the scaling factor used in the primal lattice to balance the secret and error
    scale = est._primal_scale_factor(secret_distribution,
                                     alpha=alpha,
                                     q=q,
                                     n=n)

    # 1. get squared-GSO lengths via the Geometric Series Assumption
    # we could also consider using the BKZ simulator, using the GSA is conservative
    r = sq_GSO(d, beta, q**m * scale**(n - tau))

    # 2. Costs
    bkz_cost = est.lattice_reduction_cost(reduction_cost_model,
                                          est.delta_0f(beta), d)
    enm_cost = est.Cost()
    enm_cost["rop"] = d**2 / (2**1.06)

    # 3. Size of search space
    # We need to do one BDD call at least
    search_space, prob, hw = ZZ(1), 1.0, 0

    # if mitm is True, sqrt speedup in the guessing phase. This allows us to square the size
    # of the search space at no extra cost.
    # NOTE: we conservatively assume that this mitm process succeeds with probability 1.
    ssf = sqrt if mitm else lambda x: x

    # use the secret distribution bounds to determine the size of the search space
    a, b = est.SDis.bounds(secret_distribution)

    # perform "searching". This part of the code balances the enm_cost with the cost of lattice
    # reduction, where enm_cost is the total cost of calling Babai's algorithm on each vector in
    # the search space.

    if tau:
        prob = est.success_probability_drop(n, h, tau)
        hw = 1
        while hw < h and hw < tau:
            prob += est.success_probability_drop(n, h, tau, fail=hw)
            search_space += binomial(tau, hw) * (b - a)**hw

            if enm_cost.repeat(ssf(search_space))["rop"] > bkz_cost["rop"]:
                # we moved too far, so undo
                prob -= est.success_probability_drop(n, h, tau, fail=hw)
                search_space -= binomial(tau, hw) * (b - a)**hw
                hw -= 1
                break
            hw += 1

        enm_cost = enm_cost.repeat(ssf(search_space))

    # we use the expectation of the target norm. This could be longer, or shorter, for any given instance.
    target_norm = sqrt(m * sd**2 + h * RR((n - tau) / n) * scale**2)

    # account for the success probability of Babai's algorithm
    prob *= babai_probability_wun16(r, target_norm)

    # create a cost string, as in the LWE Estimator, to store the attack parameters and costs
    ret = est.Cost()
    ret["rop"] = bkz_cost["rop"] + enm_cost["rop"]
    ret["pre"] = bkz_cost["rop"]
    ret["enum"] = enm_cost["rop"]
    ret["beta"] = beta
    ret["|S|"] = search_space
    ret["prob"] = prob
    ret["scale"] = scale
    ret["pp"] = hw
    ret["d"] = d
    ret["tau"] = tau

    # 5. Repeat whole experiment ~1/prob times
    ret = ret.repeat(est.amplify(0.99, prob),
                     select={
                         "rop": True,
                         "pre": True,
                         "enum": True,
                         "beta": False,
                         "d": False,
                         "|S|": False,
                         "scale": False,
                         "prob": False,
                         "pp": False,
                         "tau": False
                     })

    return ret
Example #18
0
def distance_matrix(n):
    "Make a stock distance matrix."
    M = np.zeros([n,n], dtype=int)
    M[triu_indices(n, 1)] = np.arange(binomial(n,2))
    M += M.T
    return matrix(M)
Example #19
0
def beta_star(n, r):
    return sum(
        [m1pow(d - r) * binomial(d, r) * beta(n, d) for d in range(n + 1)])
Example #20
0
def tbin(a, b):
    return (binomial(a - b + 1, b) - binomial(a - b - 1, b - 2)) * (-1) ** b
 def compute_estimate(p):
     iters = 1.* binomial(n, k)/ \
         sum( binomial(n-tau, k-i)*binomial(tau,i) for i in range(p+1) )
     estimate = iters*(T + \
         sum(P[pi] * (q-1)**pi * binomial(k, pi) for pi in range(p+1) ))
     return estimate
Example #22
0
def __binomial(a, b):
    return sg.binomial(a, b)
Example #23
0
def rho_star(n, r):
    return sum(
        [m1pow(d - r) * binomial(d, r) * rho(n, d) for d in range(n + 1)])
Example #24
0
def proba(n, p, k):
    return binomial(n, k)*p**k*(1-p)**(n-k)
def compute_frob_matrix_and_cp_H2(f, p, prec, **kwargs):
    """
    Return a p-adic matrix approximating the action of Frob on H^2 of a surface or abelian variety,
    and its characteristic polynomial over ZZ
    Input:
        - f defining the curve or surface
        - p, prime
        - prec, a lower bound for the desired precision to run the computations, this increases the time exponentially
        - kwargs, keyword arguments to be passed to controlledreduction

    Output:
        - `prec`, the minimum digits absolute precision for approximation of the Frobenius
        - a matrix representing an approximation of Frob matrix with at least `prec` digits of absolute precision
        - characteristic polynomial of Frob on H^2
        - the number of classes omitted by working with primitive cohomology

    Note: if given two or one univariate polynomial, we will try to change the model over Qpbar,
    in order to work with an odd and monic model
    """
    K = Qp(p, prec=prec + 10)
    OK = ZpCA(p, prec=prec)
    Rf = f.parent()
    R = f.base_ring()
    if len(Rf.gens()) == 2:
        if min(f.degrees()) != 2:
            raise NotImplementedError("Affine curves must be hyperelliptic")
        x, y = f.variables()
        if f.degree(x) == 2:
            f = f.substitute(x=y, y=x)
        # Get Weierstrass equation
        # y^2 + a*y  + b == 0
        b, a, _ = map(R['x'], R['x']['y'](f).monic())
        # y^2 + a*y  + b == 0 --> (2y + a)^2 = a^2 - 4 b
        f = a**2 - 4 * b
        f = find_monic_and_odd_model(f.change_ring(K), p)
        cp1 = HyperellipticCurve(f.change_ring(
            GF(p))).frobenius_polynomial().reverse()
        F1 = hypellfrob(p, max(3, prec), f.lift())
        F1 = F1.change_ring(OK)
        cp, frob_matrix = from_H1_to_H2(cp1,
                                        F1,
                                        tensor=kwargs.get('tensor', False))
        frob_matrix = frob_matrix.change_ring(K)
        shift = 0
    elif len(Rf.gens()) == 3 and f.total_degree() == 4 and f.is_homogeneous():
        # Quartic plane curve
        if p < 17:
            prec = max(4, prec)
        else:
            prec = max(3, prec)
        if 'find_better_model' in kwargs:
            model = kwargs['find_better_model']
        else:
            # there is a speed up, but we may also lose some nice numerical stability from the original sparseness
            model = binomial(2 + (prec - 1) * f.total_degree(),
                             2) < 2 * len(list(f**(prec - 1)))
        cp1, F1 = controlledreduction(
            f,
            p,
            min_abs_precision=prec,
            frob_matrix=True,
            threads=1,
            find_better_model=model,
        )
        # change ring to OK truncates precision accordingly
        F1 = F1.change_ring(OK)
        cp, frob_matrix = from_H1_to_H2(cp1,
                                        F1,
                                        tensor=kwargs.get('tensor', False))
        shift = 0
    elif len(Rf.gens()) == 4 and f.total_degree() in [4, 5
                                                      ] and f.is_homogeneous():
        shift = 1
        # we will not see the polarization
        # Quartic surface
        if f.total_degree() == 4:
            if p == 3:
                prec = max(5, prec)
            elif p == 5:
                prec = max(4, prec)
            elif p < 43:
                prec = max(3, prec)
            else:
                prec = max(2, prec)
        elif f.total_degree() == 5:
            if p in [3, 5]:
                prec = max(7, prec)
            elif p <= 23:
                prec = max(6, prec)
            else:
                prec = max(5, prec)
        OK = ZpCA(p, prec=prec)
        # a rough estimate if it is worth to find a non degenerate mode for f
        if 'find_better_model' in kwargs:
            model = kwargs['find_better_model']
        else:
            # there is a speed up, but we may also lose some nice numerical stability from the original sparseness
            model = binomial(3 + (prec - 1) * f.total_degree(),
                             3) < 4 * len(list(f**(prec - 1)))
        threads = kwargs.get('threads', ncpus)
        cp, frob_matrix = controlledreduction(f,
                                              p,
                                              min_abs_precision=prec,
                                              frob_matrix=True,
                                              find_better_model=model,
                                              threads=threads)
        frob_matrix = frob_matrix.change_ring(OK).change_ring(K)
    else:
        raise NotImplementedError("At the moment we only support:\n"
                                  " - Quartic or quintic surfaces\n"
                                  " - Jacobians of quartic curves\n"
                                  " - Jacobians of hyperelliptic curves\n")
    return prec, cp, frob_matrix, shift
Example #26
0
def make_cone(p):
    from kalmanson import upper_triangle
    return Cone(map(upper_triangle, p), ZZ**binomial(p[0].ncols(), 2))