Ejemplo n.º 1
0
def idsvp_exact(ideal, b_prec=fp.BIT_PREC_DEFAULT, approx_bkz=False):
    _K       = ideal.number_field();
    _r1, _r2 = _K.signature();

    # Embedding lattice in Minkowski space
    # minkowski(x1,x2,..) returns M = ( S(x1) | S(x2 | ... ) in columns, hence the transposition.
    _id_ZB   = ideal.basis();
    _id_L    = Matrix(RealField(b_prec), _K.minkowski_embedding(_id_ZB, prec=2*b_prec).transpose());
    if (approx_bkz == False):
        _s_L     = svp_exact(_id_L);
    else:
        _id_bkz, _ = bkz(_id_L, block_size=40, bkzmaxloops=300);
        _s_L       = _id_bkz[0];
    
    # Going back... is just inverting the Minkowski matrix (on the Z-basis of ideal)
    _s_ZB    = _s_L*rc_mat_inverse(_id_L);
    #_s_ZB      = _id_L.solve_left(_s_L, check=False); # (check=True) work only for exact rings
    _dist_s_ZZ = [ _coef - round(_coef) for _coef in _s_ZB ];
    assert (fp.fp_check_zero("_s_ZB-ZZ", _dist_s_ZZ, target=b_prec, sloppy=True)); # /!\ Sloppy

    _s       = sum(round(_s_ZB[_k])*_id_ZB[_k] for _k in range(len(_id_ZB)));
    assert (_s in ideal), "[Err] Minkowski pre-image is not in the target ideal (pre-image space)";
    assert (fp.fp_check_zero("T2(x)-l2(MK.x)", [_s_L.norm()-t2_norm(_s, b_prec=b_prec)], target=b_prec, sloppy=True)); # /!\ Sloppy
    
    return _s;
Ejemplo n.º 2
0
def get_twfHcE_matrix(r1, r2, fb, method, b_prec=fp.BIT_PREC_DEFAULT):
    assert (method in __METHOD_TYPE)

    _n = r1 + 2 * r2
    _nu = r1 + r2 - 1
    _k = len(fb)

    # Always project on H or H0
    _span_H = (_n + _k) if (method == 'TW') else (_n)
    _pH = get_projection_H(_n + _k, _span_H, b_prec=b_prec)
    assert (fp.fp_check_zero("PrH(1)",
                             (vector([1] * (_span_H) + [0] *
                                     (_n + _k - _span_H)) *
                              _pH).coefficients(),
                             target=b_prec))

    # Scaling by c (done before because of the 'NONE' iso type
    _c = twphs_get_c(_n, fb, method, b_prec=b_prec)
    _c_Id = block_diagonal_matrix(_c * identity_matrix(_n),
                                  identity_matrix(len(fb)))
    assert (_c_Id.base_ring().precision() >= b_prec)

    # Full-rank strategy
    if (method == 'NONE'):  # Do nothing.
        _fH = matrix(RealField(b_prec), identity_matrix(_n + k))
    elif (method == 'PHS'):  # Prune r2 coordinates + last remaining
        _fH = block_diagonal_matrix(
            [get_ext_pruning(r1, r2),
             identity_matrix(_k)], subdivide=False)
        _fH = _fH.change_ring(RealField(b_prec))
    elif (method == 'OPT'):  # MK-isometry on _n first
        _fH = block_diagonal_matrix(
            [get_minkH(r1, r2, 0, b_prec=b_prec),
             identity_matrix(_k)],
            subdivide=False)
        assert (fp.fp_check_zero("Vol(fH)==1",
                                 [vol(_fH.transpose()) - RealField(b_prec)(1)],
                                 target=b_prec,
                                 sloppy=True))
        # /!\ Sloppy
    elif (method == 'TW'):  # MK-isometry on _n+_k (whole space)
        _fH = get_minkH(r1, r2, _k, b_prec=b_prec)
        assert (fp.fp_check_zero("Vol(fH)==1",
                                 [vol(_fH.transpose()) - RealField(b_prec)(1)],
                                 target=b_prec,
                                 sloppy=True))
        # /!\ Sloppy

    # Finally, proj -> scaling -> full-rank
    _fHcE = (_pH * _c_Id * _fH)
    assert (_fH.base_ring().precision() >= b_prec)
    assert ((_fHcE.nrows() == _n + _k) and (_fHcE.ncols() == _nu + _k))
    assert (_fHcE.base_ring().precision() >= b_prec)

    return _fHcE
Ejemplo n.º 3
0
def gram_schmidt_ortho(M, normalize=False):
    _R = M.base_ring().fraction_field()
    _n = M.nrows()

    _G = M.change_ring(_R)
    _P = identity_matrix(_R, _n)

    # Main loop
    # NB: Exchanging _i and _j loop would lead to somewhat "order-independent" algorithm,
    #     allowing to choose the smallest length projection for the next step
    #    for _i in range(1,_n):
    #        _G[_i] = _G[_i] - sum( (_G[_i]*_G[_j])/_G[_j].norm()**2 * _G[_j] for _j in range(_i));
    for _i in range(1, _n):
        _mu_i = [(_G[_i] * _G[_j]) / _G[_j].norm()**2
                 for _j in range(_i)] + [1] + [0] * (_n - _i - 1)
        _G[_i] = _G[_i] - sum(_mu_i[_j] * _G[_j] for _j in range(_i))
        _P[_i] = vector(_R, _mu_i)
        #[_mu_i[_j] for _j in range(_i)] + [_R(1)] + [_R(0)]*(_n-_i-1));

    # Orthonormalization (not by default)
    if (normalize == True):
        for _i in range(_n):
            _norm_i = _G[_i].norm()
            _P[:, _i] *= _norm_i
            _G[_i] /= _norm_i

    assert (_G.base_ring() == M.base_ring().fraction_field())
    assert (fp.fp_check_zero("M-PG", (M - _P * _G).coefficients(),
                             target=_R.precision()))
    # **Warn** This assertion is not costless
    return _G, _P
Ejemplo n.º 4
0
def extend_inf_places(K, p_inf, to_prec=fp.BIT_PREC_DEFAULT):
    assert(len(p_inf) == get_nb_inf_places(K));
    if (to_prec*__BIT_PREC_INFP_SCALE <= p_inf[0].codomain().precision()):
        return p_inf;
    _new_p_inf = K.places(prec=to_prec*__BIT_PREC_INFP_SCALE);
    assert (fp.fp_check_zero("phi++-phi", [ _new_p_inf[_k](K.gen())-p_inf[_k](K.gen()) for _k in range(len(p_inf)) ], target=p_inf[0].codomain().precision()));
    return _new_p_inf;
Ejemplo n.º 5
0
def __inf_place_in_stream(stream, K, to_prec=0):
    _z_reim  = [sage_eval(_s) for _s in stream.readline().rstrip().split(' ')];

    # Determine precision
    _in_prec = min(_z_reim_p.parent().precision() for _z_reim_p in _z_reim);
    assert (to_prec <= _in_prec);
    _prec   = _in_prec if (to_prec == 0) else to_prec;
    _RC     = RealField(_prec) if (len(_z_reim) == 1) else ComplexField(_prec);

    # Map input strings into RR or CC, verify it is indeed a root
    _z      = _RC(*_z_reim);
    assert (fp.fp_check_zero("K.eq(z)", [K.gen().minpoly()(_z)], target=_RC.precision()));
    
    return K.hom(_z, codomain=_RC, check=False); # Rk: Same as code of K.places()
Ejemplo n.º 6
0
def twphs_build_solution(cldl, log_s_cvp, BL, u_su):
    _b_prec = log_s_cvp.base_ring().precision()
    # log_s_cvp can come from a BKZ basis with less precision than BL

    # Carefully compute _y \in ZZ^dim st _y*BL = log_s_cvp
    _y_R = BL.solve_left(log_s_cvp, check=False)
    # check=True only works on exact rings
    _y = vector(map(round, _y_R))
    assert (fp.fp_check_zero("y_R-ZZ", (_y_R - _y).coefficients(),
                             target=_b_prec))
    # Should be fine is BL has enough precision.

    # S-unit corresponding to log_s_cvp
    assert (len(u_su) == len(_y))
    # This can be (very) long !! Other methods (dividing step by step or ...) seem even longer.
    _s = prod(map(pow, u_su, _y))

    return (cldl / _s)
Ejemplo n.º 7
0
def rc_mat_inverse(M):
    _R = M.base_ring()
    # Real or Complex
    _b_prec = _R.precision()
    _RR = _R.to_prec(4 * _b_prec)
    # Should be enough, 1 is not sufficient, 2/3 maybe

    _iM = ((M.change_ring(_RR)).inverse()).change_ring(_R)

    # Check there is no precision issue (NB: compute the product in high precision)
    _chk_M = (M.change_ring(_RR) *
              _iM.change_ring(_RR)).change_ring(_R) - identity_matrix(
                  M.nrows())
    assert (fp.fp_check_zero("M*iM-In",
                             _chk_M.coefficients(),
                             target=_b_prec,
                             sloppy=True))
    # /!\ Sloppy

    return _iM
Ejemplo n.º 8
0
def twphs_get_target(eta,
                     a,
                     p_inf,
                     fb,
                     method,
                     beta=0.0,
                     b_prec=fp.BIT_PREC_DEFAULT,
                     _pcmp_fhce=0):
    assert (p_inf[0].codomain().precision() >= b_prec)
    _K = fb[0].number_field()
    _n = _K.degree()
    _r1, _r2 = _K.signature()

    # Work prec: enough to handle (rational) coefficients of eta
    #_w_prec = max([max([RealField(1000)(log(_coef.abs())) for _coef in _su.list()]) for _su in un + s_un]); # All coefficients are integers, if s_un is S-units(fb).
    #p_inf   = extend_inf_places(_K, p_inf, to_prec=_w_prec);
    _work_prec = p_inf[0].codomain().precision()
    # Above takes too long, assume p_inf HAS enough prec

    # Log embedding
    _inf_type = 'EXPANDED'
    # s1,..,sr1,sr1+1,sr1+1,...,sr1+r2,sr1+r2.
    _fb_type = 'TWISTED' if (method == 'TW') else 'FLAT'
    # flat is just -vp(elt).
    # Log of eta, and compensation for the p|a for p in FB
    _log_eta = log_embedding(eta,
                             p_inf,
                             fb=fb,
                             inf_type=_inf_type,
                             fb_type=_fb_type,
                             b_prec=_work_prec)
    _log_a = log_embedding(a,
                           p_inf,
                           fb=fb,
                           inf_type='NONE',
                           fb_type=_fb_type,
                           b_prec=_work_prec)
    # inf_type='NONE' triggers "Ideal" code -> only FB
    _log_t = _log_eta - _log_a
    assert (_log_t.base_ring().precision() >= _work_prec)
    _log_t.change_ring(RealField(b_prec))
    if (_fb_type == 'TWISTED'):  # Otherwise, there is nothing we could check
        assert (fp.fp_check_zero("sum(log_t)-N(a)",
                                 [sum(_log_t) - a.norm().log(prec=b_prec)],
                                 target=b_prec))

    # Construct drift vector: lambda_p = beta - ln N(p).
    # Twisted case:
    #     We removed the ln N(p) for now: this brings more complications for "exact_cvp" (infinite loop)
    #     Maybe we should try "max (0, beta - ln N(p))".
    #     This works already well using a constant "- ln N(pmin)" (see twphs_guess_beta('TW')).
    _beta = RealField(b_prec)(beta)
    _drift_v = vector([RealField(b_prec)(0)] * _n + [_beta] * len(fb))

    # Projection wizardry
    if (_pcmp_fhce == 0):
        _log_t = twfHcE(_log_t + _drift_v, _r1, _r2, fb, method, b_prec=b_prec)
    else:
        _log_t = (_log_t + _drift_v) * _pcmp_fhce

    assert (_log_t.base_ring().precision() >= b_prec)
    return _log_t