def worker_process(seed, params, queue=None): """ This function is called to collect statistics. :param A: basis :param params: BKZ parameters :param queue: queue used for communication """ FPLLL.set_random_seed(seed) A = IntegerMatrix.random(params.block_size, "qary", bits=30, k=params.block_size//2, int_type="long") M = GSO.Mat(A) bkz = CallbackBKZ(M) # suppresses initial LLL call tracer = BKZTreeTracer(bkz, start_clocks=True) with tracer.context(("tour", 0)): bkz.svp_reduction(0, params.block_size, params, tracer) M.update_gso() tracer.exit() try: # close connection params.strategies[params.block_size].connection.send(None) except AttributeError: pass if queue: queue.put(tracer.trace) else: return tracer.trace
def svp_time(seed, params, return_queue=None): """Run SVP reduction of AutoBKZ on ``A`` using ``params``. :param A: a matrix :param params: AutoBKZ parameters :param queue: if not ``None``, the result is put on this queue. """ FPLLL.set_random_seed(seed) FPLLL.set_threads(params["threads"]) q = 33554393 k = params.block_size // 2 A = IntegerMatrix.random(params.block_size, "qary", q=q, k=k, int_type="long") M = GSO.Mat(A) bkz = BKZ2(M) tracer = BKZTreeTracer(bkz, start_clocks=True) with tracer.context(("tour", 0)): bkz.svp_reduction(0, params.block_size, params, tracer) bkz.M.update_gso() tracer.exit() log_delta = (log(A[0].norm()) - log(q) * (k / float(params.block_size))) / float(params.block_size) tracer.trace.data["delta"] = exp(log_delta) if return_queue: return_queue.put(tracer.trace) else: return tracer.trace
def sample_matrix(d, lattice_type="qary", seed=None): """ Sample a matrix in dimension `d`. :param d: lattice dimension :param lattice_type: see module level documentation :param seed: optional random seed :returns: LLL-reduced integer matrix .. note :: This function seeds the FPLLL RNG, i.e. it is deterministic. """ if seed is None: FPLLL.set_random_seed(d) else: FPLLL.set_random_seed(seed) if lattice_type == "qary": A = IntegerMatrix.random(d, "qary", bits=30, k=d // 2, int_type="long") elif lattice_type == "qary-lv": A = IntegerMatrix.random(d, "qary", bits=10 * d, k=d // 2) else: raise ValueError("Lattice type '%s' not supported." % lattice_type) A = LLL.reduction(A) return A
def worker_process(seed, params, queue=None): """ This function is called to collect statistics. :param A: basis :param params: BKZ parameters :param queue: queue used for communication """ FPLLL.set_random_seed(seed) A = IntegerMatrix.random(params.block_size, "qary", bits=30, k=params.block_size // 2, int_type="long") M = GSO.Mat(A) bkz = CallbackBKZ(M) # suppresses initial LLL call tracer = BKZTreeTracer(bkz, start_clocks=True) with tracer.context(("tour", 0)): bkz.svp_reduction(0, params.block_size, params, tracer) M.update_gso() tracer.exit() try: # close connection params.strategies[params.block_size].connection.send(None) except AttributeError: pass if queue: queue.put(tracer.trace) else: return tracer.trace
def load_matrix_file(filepath, randomize=False, seed=None): """ Load matrix from file, LLL reduce (and randomize). :param filepath: Load matrix from this file :param randomize: Randomize the basis :param seed: Seed for randomization :returns: lattice basis and BKZ object """ A = IntegerMatrix.from_file(filepath) A = LLL.reduction(A) A = IntegerMatrix.from_matrix(A, int_type="long") M = GSO.Mat(A, float_type="double", flags=GSO.ROW_EXPO) bkz = BKZReduction(M) if seed is not None: FPLLL.set_random_seed(seed) if randomize: bkz.randomize_block(0, A.nrows, density=A.ncols / 4) LLL.reduction(A) bkz = BKZReduction(A) LLL.reduction(A) bkz.lll_obj() # to initialize bkz.M etc return A, bkz
def test_callback_enum(d=40): FPLLL.set_random_seed(0x1337) A = LLL.reduction(IntegerMatrix.random(100, "qary", k=50, q=7681)) M = GSO.Mat(A) M.update_gso() # we are not imposing a constraint enum_obj = Enumeration(M) solutions = enum_obj.enumerate(0, d, 0.99*M.get_r(0, 0), 0) max_dist, sol = solutions[0] assert(A.multiply_left(sol)[0] != 2) # now we do def callback(new_sol_coord): if A.multiply_left(new_sol_coord)[0] == 2: return True else: return False enum_obj = Enumeration(M, callbackf=callback) solutions = enum_obj.enumerate(0, d, 0.99*M.get_r(0, 0), 0) max_dist, sol = solutions[0] assert(A.multiply_left(sol)[0] == 2)
def cost_kernel(arg0, d=None, c=None, preproc=None, strategies=None, costs=None, opts=None): """ Compute pruning coefficients after preprocessing and return estimated cost. :param arg0: either a tuple containing all arguments or r (squared Gram-Schmidt vectors) :param d: enumeration dimension :param c: overshoot parameter :param preproc: preprocessing parameters :param strategies: reduction strategies :param costs: precomputed costs for smaller dimensions :param opts: passed through to `enumeration_cost` :returns: cost and strategy .. note :: the unusual arrangement with ``arg0`` is to support ``Pool.map`` which only supports one input parameter. """ if preproc is None and c is None and strategies is None and costs is None and opts is None: r, d, c, preproc, strategies, costs, opts = arg0 else: r = arg0 float_type = opts["float_type"] if isinstance(float_type, int): FPLLL.set_precision(float_type) opts["float_type"] = "mpfr" try: return enumeration_cost(r, d, c, preproc, strategies, costs, **opts) except RuntimeError: return None, None
def svp_time(seed, params, return_queue=None): """Run SVP reduction of AutoBKZ on ``A`` using ``params``. :param A: a matrix :param params: AutoBKZ parameters :param queue: if not ``None``, the result is put on this queue. """ FPLLL.set_random_seed(seed) A = IntegerMatrix.random(params.block_size, "qary", bits=30, k=params.block_size//2, int_type="long") M = GSO.Mat(A) bkz = BKZ2(M) tracer = BKZTreeTracer(bkz, start_clocks=True) with tracer.context(("tour", 0)): bkz.svp_reduction(0, params.block_size, params, tracer) bkz.M.update_gso() tracer.exit() tracer.trace.data["|A_0|"] = A[0].norm() if return_queue: return_queue.put(tracer.trace) else: return tracer.trace
def test_bkz_init(): for cls in (SimpleBKZ, SimpleDualBKZ, BKZ, BKZ2): for n in dimensions: FPLLL.set_random_seed(2**10 + n) A = make_integer_matrix(n) B = cls(copy(A)) del B
def test_randomize(): FPLLL.set_random_seed(1337) A0 = make_integer_matrix(20, 20) FPLLL.set_random_seed(1337) A1 = make_integer_matrix(20, 20) for i in range(20): for j in range(20): assert A0[i, j] == A1[i, j]
def test_bkz_call(block_size=10): params = fplll_bkz.Param(block_size=block_size, flags=fplll_bkz.VERBOSE | fplll_bkz.GH_BND) for cls in (BKZ, BKZ2): for n in dimensions: FPLLL.set_random_seed(n) A = make_integer_matrix(n) B = copy(A) cls(B)(params=params)
def compare_bkz(classes, matrixf, dimensions, block_sizes, progressive_step_size, seed, threads=2, samples=2, tours=1, pickle_jar=None, logger="compare"): """ Compare BKZ-style lattice reduction. :param classes: a list of BKZ classes to test. See caveat above. :param matrixf: A function to create matrices for a given dimension and block size :param dimensions: a list of dimensions to test :param block_sizes: a list of block sizes to test :param progressive_step_size: step size for the progressive strategy; ``None`` to disable it :param seed: A random seed, each matrix will be created with seed increased by one :param threads: number of threads to use :param samples: number of reductions to perform :param tours: number of BKZ tours to run :param log_filename: log to this file if not ``None`` """ jobs = [] for dimension in dimensions: jobs.append((dimension, [])) for block_size in block_sizes: if dimension < block_size: continue seed_ = seed jobs_ = [] matrixf_ = matrixf(dimension=dimension, block_size=block_size) for i in range(samples): FPLLL.set_random_seed(seed_) A = IntegerMatrix.random(dimension, **matrixf_) for BKZ_ in classes: args = (BKZ_, A, block_size, tours, progressive_step_size) jobs_.append(((BKZ_.__name__, seed_), args)) seed_ += 1 jobs[-1][1].append((block_size, jobs_)) conductor = Conductor(threads=threads, pickle_jar=pickle_jar, logger=logger) return conductor(jobs)
def __call__(cls, M, predicate, block_size, invalidate_cache=lambda: None, max_loops=8, threads=1, **kwds): bkz = BKZ2(M) if block_size > STRATEGIES_MAX_DIM: warnings.warn( "reducing block size to {max}".format(max=STRATEGIES_MAX_DIM)) block_size = STRATEGIES_MAX_DIM FPLLL.set_threads(threads) params = BKZ.EasyParam(block_size=block_size, **kwds) auto_abort = BKZ.AutoAbort(M, M.d) tracer = BKZTreeTracer(bkz, root_label="bkz_enum", start_clocks=True) found, ntests, solution = False, 0, None for tour in range(max_loops): bkz.tour(params) if auto_abort.test_abort(): break invalidate_cache() with tracer.context("check"): for i, v in enumerate(bkz.M.B): ntests += 1 if predicate(v, standard_basis=True): found = True solution = tuple([int(v_) for v_ in v]) break if found: break FPLLL.set_threads(1) tracer.exit() b0, b0e = bkz.M.get_r_exp(0, 0) return USVPPredSolverResults( success=found, solution=solution, ntests=ntests, b0=b0**(0.5) * 2**(b0e / 2.0), cputime=tracer.trace.data["cputime"], walltime=tracer.trace.data["walltime"], data=tracer.trace, )
def test_simple_bkz_reduction(block_size=10): for n in dimensions: FPLLL.set_random_seed(n) A = make_integer_matrix(n) LLL.reduction(A) B = copy(A) BKZ.reduction(B, BKZ.Param(block_size=block_size)) C = copy(A) SimpleBKZ(C)(block_size=block_size) assert abs(C[0].norm() - B[0].norm()) < 0.1 assert abs(C[0].norm() < A[0].norm())
def compute_kernel(args): if args.seed is not None: set_random_seed(args.seed) FPLLL.set_random_seed(args.seed) ecdsa = ECDSA(nbits=args.nlen) lines, k_list, _ = ecdsa.sample(m=args.m, klen_list=args.klen_list, seed=args.seed, errors=args.e) w_list = [2 ** (klen - 1) for klen in args.klen_list] f_list = [Integer(max(w_list) / wi) for wi in w_list] targetvector = vector([(k - w) * f for k, w, f in zip(k_list, w_list, f_list)] + [max(w_list)]) try: solver = ECDSASolver(ecdsa, lines, m=args.m, d=args.d, threads=args.threads) except KeyError: raise ValueError("Algorithm {alg} unknown".format(alg=args.alg)) expected_length = solver.evf(args.m, max(args.klen_list), prec=args.nlen // 2) gh = solver.ghf(args.m, ecdsa.n, args.klen_list, prec=args.nlen // 2) params = args.params if args.params else {} key, res = solver(solver=args.algorithm, flavor=args.flavor, **params) RR = RealField(args.nlen // 2) logging.info( ( "try: {i:3d}, tag: 0x{tag:016x}, success: {success:1d}, " "|v|: 2^{v:.2f}, |b[0]|: 2^{b0:.2f}, " "|v|/|b[0]|: {b0r:.3f}, " "E|v|/|b[0]|: {eb0r:.3f}, " "|v|/E|b[0]|: {b0er:.3f}, " "cpu: {cpu:10.1f}s, " "wall: {wall:10.1f}s, " "work: {total:d}" ).format( i=args.i, tag=args.tag, success=int(res.success), v=float(log(RR(targetvector.norm()), 2)), b0=float(log(RR(res.b0), 2)), b0r=float(RR(targetvector.norm()) / RR(res.b0)), eb0r=float(RR(expected_length) / RR(res.b0)), b0er=float(RR(targetvector.norm()) / gh), cpu=float(res.cputime), wall=float(res.walltime), total=res.ntests, ) ) return key, res, float(targetvector.norm())
def gso_workerf(args): import copy d, q, seed, params, procrastinating, what = args dummy = [1.0] * d if procrastinating: from impl import BKZReduction from simu import ProcrastinatingBKZSimulation as BKZSimulation from simu import ( ProcrastinatingBKZQualitySimulation as BKZQualitySimulation, ) else: from fpylll.algorithms.bkz2 import BKZReduction from simu import BKZSimulation from simu import BKZQualitySimulation FPLLL.set_random_seed(seed) A = LLL.reduction(IntegerMatrix.random(d, "qary", k=d // 2, q=q)) if "qs" in what: qsimu_r = BKZQualitySimulation(copy.copy(A))(params) else: qsimu_r = dummy if "fs" in what: fsimu_r = BKZSimulation(copy.copy(A))(params) else: fsimu_r = dummy if "r" in what: BKZReduction(A)(params) M = GSO.Mat(A) M.update_gso() real_r = M.r() else: real_r = dummy return qsimu_r, fsimu_r, real_r
def simulate(r, param, prng_seed=0xdeadbeef): """ Wraps original BSW18 simulator for ease of comparison. """ if not prng_seed: prng_seed = FPLLL.randint(0, 2**32 - 1) random.seed(prng_seed) if isinstance(r, IntegerMatrix): r = GSO.Mat(r) if isinstance(r, MatGSO): r.update_gso() r = r.r() n = len(r) # code uses ln of squared norms, FPLLL uses squared norms l = list(map(log, r)) if param.max_loops: N = param.max_loops else: N = n # using original.rk_ln # l1 = probabilistic_bkz_simulator.bkz_simulation_stochastic( # l, param.block_size, N, original.rk_ln) # using rk from this file -- more direct comparison, essentially the same log2_e = log(exp(1), 2) l1 = probabilistic_bkz_simulator.bkz_simulation_stochastic( l, param.block_size, N, list(map(lambda x: x / log2_e, rk))) l1 = list(map(exp, l1)) return l1, N
def pruning(self, query): block_size = self.block_size pruning = [] R = [] preproc_time = [] probability = [] for i, data in enumerate(query): if data is None: continue rs, r, preproc_t, probability_ = data gh_radius = gaussian_heuristic(rs) R.append([x / gh_radius for x in rs]) preproc_time.append(preproc_t) probability.append(probability_) threads = FPLLL.get_threads() preproc_time = sum(threads * preproc_time) / len(preproc_time) overhead = nodes_per_sec(block_size) * preproc_time probability = sum(probability) / len(probability) for i in range(-PruningStrategizer.GH_FACTORS_STEPS, PruningStrategizer.GH_FACTORS_STEPS + 1): radius = gh_margin(block_size)**( 1.0 * i / PruningStrategizer.GH_FACTORS_STEPS) try: pruning_ = Pruning.run(radius, overhead, R, min(1.05 * probability, 0.999), flags=Pruning.GRADIENT) pruning.append(pruning_) except RuntimeError as exception: # HACK: this really shouldn't happen print(block_size, radius, exception) return tuple(pruning)
def test_precision(): FPLLL.set_precision(53) assert FPLLL.get_precision() == 53 assert FPLLL.set_precision(100) == 53 assert FPLLL.set_precision(100) == 100
def approx_svp_time(seed, params, return_queue=None, progressive=False): """Run Approx-SVP_{1.05} reduction on ``A`` using ``params``. :param seed: random seed for matrix creation :param params: BKZ preprocessing parameters, preprocessing block size is ignored :param return_queue: if not ``None``, the result is put on this queue. :param progressive: run Progressive-BKZ """ from chal import load_svp_challenge from fpylll.algorithms.bkz import BKZReduction as BKZBase FPLLL.set_random_seed(seed) A = load_svp_challenge(params.block_size, seed=seed) M = GSO.Mat(A) M.update_gso() gh = gaussian_heuristic(M.r()) target_norm = 1.05**2 * gh nodes_per_second = 2.0 * 10**9 / 100.0 self = BKZ2(M) tracer = BKZTreeTracer(self, start_clocks=True) rerandomize = False preproc_cost = None with tracer.context(("tour", 0)): while M.get_r(0, 0) > target_norm: with tracer.context("preprocessing"): if rerandomize: self.randomize_block( 1, params.block_size, density=params.rerandomization_density, tracer=tracer) with tracer.context("reduction"): BKZBase.svp_preprocessing(self, 0, params.block_size, params, tracer) # LLL preproc = round(0.9878 * params.block_size - 24.12) # curve fitted to chal.py output prepar = params.__class__(block_size=preproc, strategies=params.strategies, flags=BKZ.GH_BND) self.tour(prepar, 0, params.block_size, tracer=tracer) if preproc_cost is None: preproc_cost = float( tracer.trace.find("preprocessing")["walltime"]) preproc_cost *= nodes_per_second with tracer.context("pruner"): step_target = M.get_r(0, 0) * 0.99 if progressive else target_norm pruner = Pruning.Pruner(step_target, preproc_cost, [M.r()], target=1, metric=Pruning.EXPECTED_SOLUTIONS) coefficients = pruner.optimize_coefficients([1.] * M.d) try: enum_obj = Enumeration(self.M) with tracer.context("enumeration", enum_obj=enum_obj, full=True): max_dist, solution = enum_obj.enumerate( 0, params.block_size, target_norm, 0, pruning=coefficients)[0] with tracer.context("postprocessing"): self.svp_postprocessing(0, params.block_size, solution, tracer=tracer) rerandomize = False except EnumerationError: rerandomize = True self.M.update_gso() logger.debug("r_0: %7.2f, target: %7.2f, preproc: %3d" % (log(M.get_r(0, 0), 2), log(target_norm, 2), preproc)) tracer.exit() tracer.trace.data["|A_0|"] = A[0].norm() tracer.trace.data["preprocessing_block_size"] = preproc if return_queue: return_queue.put(tracer.trace) else: return tracer.trace
def simulate_prob(r, param, prng_seed=0xdeadbeef): """ BKZ simulation algorithm as proposed by Bai and Stehlé and Wen in "Measuring, simulating and exploiting the head concavity phenomenon in BKZ". Returns the reduced squared norms of the GSO vectors of the basis and the number of BKZ tours simulated. This version terminates when no substantial progress is made anymore or at most ``max_loops`` tours were simulated. If no ``max_loops`` is given, at most ``d`` tours are performed, where ``d`` is the dimension of the lattice. :param r: squared norms of the GSO vectors of the basis. :param param: BKZ parameters EXAMPLE: >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ >>> FPLLL.set_random_seed(1337) >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50)) >>> M = GSO.Mat(A) >>> from fpylll.tools.bkz_simulator import simulate_prob >>> _ = simulate_prob(M, BKZ.Param(block_size=40, max_loops=4, flags=BKZ.VERBOSE)) {"i": 0, "r_0": 2^33.1, "r_0/gh": 5.193166, "rhf": 1.017512, "/": -0.07022, "hv/hv": 2.428125} {"i": 1, "r_0": 2^32.7, "r_0/gh": 3.997766, "rhf": 1.016182, "/": -0.06214, "hv/hv": 2.168460} {"i": 2, "r_0": 2^32.3, "r_0/gh": 3.020156, "rhf": 1.014759, "/": -0.05808, "hv/hv": 2.059562} {"i": 3, "r_0": 2^32.2, "r_0/gh": 2.783102, "rhf": 1.014344, "/": -0.05603, "hv/hv": 2.013191} """ if param.block_size <= 2: raise ValueError("The BSW18 simulator requires block size >= 3.") # fix PRNG seed random.seed(prng_seed if prng_seed else FPLLL.randint(0, 2**32 - 1)) r = _extract_log_norms(r) d = len(r) r1 = copy(r) r2 = copy(r) c = [rk[-j] - sum(rk[-j:]) / j for j in range(1, 46)] c += [(lgamma(beta / 2.0 + 1) * (1.0 / beta) - log(sqrt(pi))) / log(2.0) for beta in range(46, param.block_size + 1)] if param.max_loops: N = param.max_loops else: N = d t0 = [True for _ in range(d)] for i in range(N): t1 = [False for _ in range(d)] for k in range(d - min(45, param.block_size)): beta = min(param.block_size, d - k) f = k + beta phi = False for kp in range(k, f): phi |= t0[kp] logV = sum(r1[:f]) - sum(r2[:k]) if phi: X = random.expovariate(.5) lma = (log(X, 2) + logV) / beta + c[beta - 1] if lma < r1[k]: r2[k] = lma r2[k + 1] = r1[k] + log(sqrt(1 - 1. / beta), 2) dec = (r1[k] - lma) + (r1[k + 1] - r2[k + 1]) for j in range(k + 2, f): r2[j] = r1[j] + dec / (beta - 2.) t1[j] = True phi = False for j in range(k, f): r1[j] = r2[j] # early termination if True not in t1: break # last block beta = min(45, param.block_size) logV = sum(r1) - sum(r2[:-beta]) if param.block_size < 45: rk1 = normalize_GSO_unitary(rk[-beta:]) else: rk1 = rk K = range(d - beta, d) for k, r in zip(K, rk1): r2[k] = logV / beta + r t1[k] = True # early termination if (r1 == r2): break r1 = copy(r2) t0 = copy(t1) if param.flags & BKZ.VERBOSE: r = OrderedDict() r["i"] = i for k, v in basis_quality(list(map(lambda x: 2.0**(2 * x), r1))).items(): r[k] = v print(pretty_dict(r)) r1 = list(map(lambda x: 2.0**(2 * x), r1)) return r1, i + 1
def call(max_block_size: "compute up to this block size", strategies: "BKZ strategies", dump_filename: """results are stored in this filename, if ``None`` then ``data/fplll-estimates-{lattice_type}.sobj`` is used.""" = None, npexp: "number of experiments to run parallel" = 4, ncores: "number of cores to use per experiment" = 1, algorithm: "one of SVP, oSVP or HSVP1.05" = "SVP", progressive: "use Progressive-BKZ in Approx-SVP" = False, lower_bound: "Start experiment in this dimension" = None, step_size: "Increment dimension by this much each iteration" = 2, c: "Overshooting parameter (for oSVP)" = 0.25, samples=48): """ Run (Approx-)SVP reduction and record statistics. """ results = OrderedDict() FPLLL.set_threads(ncores) if dump_filename is None: dump_filename = "../data/fplll-observations,{lattice_type},[{strategies}].sobj".format( strategies=os.path.basename(strategies), lattice_type="svp-challenge" if algorithm == "HSVP1.05" is False else "qary") if isinstance(strategies, str): if strategies.endswith(".json"): strategies = load_strategies_json(bytes(strategies, "ascii")) elif strategies.endswith(".sobj"): strategies = pickle.load(open(strategies, "rb")) if algorithm.lower() == "svp": target = svp_time lower_bound = lower_bound if lower_bound else 20 elif algorithm.lower() == "hsvp1.05": target = approx_svp_time lower_bound = lower_bound if lower_bound else 60 elif algorithm.lower() == "osvp": target = osvp_time lower_bound = lower_bound if lower_bound else 20 else: raise ValueError("Algorithm '%s' not known." % algorithm) for block_size in range(lower_bound, max_block_size + 1, step_size): return_queue = Queue() result = OrderedDict([("total time", None)]) traces = [] # 2. run `k` processes in parallel for chunk in chunk_iterator(range(samples), npexp): processes = [] for i in chunk: seed = i param = BKZ.Param(block_size=block_size, strategies=list(strategies), flags=BKZ.VERBOSE | BKZ.GH_BND) param["c"] = c if npexp > 1: process = Process(target=target, args=(seed, param, return_queue)) processes.append(process) process.start() else: traces.append(target(seed, param, None)) active_children() if npexp > 1: for process in processes: traces.append(return_queue.get()) preprocessing_block_size = sum( [trace.data["preprocessing_block_size"] for trace in traces]) / samples total_time = sum([float(trace.data["walltime"]) for trace in traces]) / samples length = sum([trace.data["|A_0|"] for trace in traces]) / samples enum_nodes = sum([ sum([ float(enum["#enum"]) for enum in trace.find_all("enumeration") ]) for trace in traces ]) / samples logger.info( "= block size: %3d, m: %3d, t: %10.3fs, log(#enum): %6.1f |A_0| = 2^%.1f", block_size, samples, total_time, log(enum_nodes, 2), log(length, 2)) result["total time"] = total_time result["betaprime"] = preprocessing_block_size result["length"] = length result["#enum"] = enum_nodes result["traces"] = traces results[block_size] = result if results[block_size]["total time"] > 1.0 and samples > max(8, npexp): samples //= 2 if samples < npexp: samples = npexp pickle.dump(results, open(dump_filename, "wb")) return results
import os import math import random from fpylll import FPLLL from ..util import matrix_overview, str_mat from pylattice.algorithms.lll import run_LLL from pylattice.algorithms.bkz import run_BKZ, run_BKZ2, run_DBKZ from pylattice.algorithms.sieve_asvp import solve_asvp from pylattice.algorithms.sieve_svp import solve_svp SEED = int.from_bytes(os.urandom(8), 'big') random.seed(SEED) FPLLL.set_random_seed(SEED) FPLLL.set_precision(0) class SSP: """ subset sum problem """ _epsilon = 0.1 _delta = 0.99 _eta = 0.501 def __init__(self, numss=None, sums=None, key=None, verbose=0): """ :param numss: :param sums:
from sage.all import log, exp from sage.all import line, save, load, identity_matrix, matrix from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ from fpylll.tools.bkz_simulator import simulate as CN11_simulate import BSW18 import og # n_halfs, block_size, max_loops = 50, 45, 2000 n_halfs, block_size, max_loops = 90, 170, 60 # n_halfs, block_size, max_loops = 75, 60, 50 # n_halfs, block_size, max_loops = 75, 60, 20000 # generate lattice instance FPLLL.set_random_seed(1337) q = 2**30 mat = IntegerMatrix.random(2 * n_halfs, "qary", q=q, k=n_halfs) A = LLL.reduction(mat) M = GSO.Mat(A) M.update_gso() cn11 = CN11_simulate(M, BKZ.Param(block_size=block_size, max_loops=max_loops)) bsw18 = BSW18.simulate(M, BKZ.Param(block_size=block_size, max_loops=max_loops)) og = og.simulate(M, BKZ.Param(block_size=block_size, max_loops=max_loops)) g = line([(i, log(cn11[0][i])/2 - log(q)/2) for i in range(len(cn11[0]))]) \ + line([(i, log(bsw18[0][i])/2 - log(q)/2) for i in range(len(bsw18[0]))], color='red', thickness=2) \ + line([(i, log(og[0][i])/2 - log(q)/2) for i in range(len(og[0]))], color='green') save(g, "test.png", dpi=150) log_vol_cn11 = sum(map(lambda x: log(x) / 2, cn11[0])) log_vol_bsw18 = sum(map(lambda x: log(x) / 2, bsw18[0]))
#print(gpoly(1,0)) indrow = 0 A = IntegerMatrix(w, w) for v in range(m + 1): for u in range(e): coeff_vector = gpoly(u, v) #print(v,u,coeff_vector) for icol in range(len(coeff_vector)): A[indrow, icol] = int(coeff_vector[len(coeff_vector) - icol - 1]) #.item() indrow += 1 print(A) FPLLL.set_precision(120) M = GSO.Mat(A, float_type="mpfr") M.update_gso() print('before LLL:', A[0].norm(), M.get_r(0, 0)) L = LLL.Reduction(M, delta=0.9999, eta=0.5001) L() print('after LLL:', A[0].norm(), M.get_r(0, 0)) print(A[0]) A0rev = list(A[0])[::-1] hpoly = np.poly1d(A0rev) print('hpoly:', hpoly) print('eval at 2 = ', np.polyval(hpoly, 2) % (N ^ 3)) print(hpoly.r)
def __call__(cls, M, predicate, squared_target_norm, invalidate_cache=lambda: None, target_prob=None, preproc_offset=20, ph=0, threads=1, **kwds): preproc_time = None ntests = 0 if target_prob is None: target_prob = cls.DEFAULT_TARGET_PROB bkz_res = usvp_pred_bkz_enum_solve(M, predicate, block_size=min( STRATEGIES_MAX_DIM, M.d), invalidate_cache=invalidate_cache, threads=threads) if bkz_res.success: # this might be enough return bkz_res FPLLL.set_threads(threads) M.update_gso() bkz = BKZ2(M) tracer = BKZTreeTracer(bkz, root_label="enum_pred", start_clocks=True) remaining_probability, rerandomize, found, solution = (1.0, False, False, None) while remaining_probability > 1.0 - target_prob: invalidate_cache() with tracer.context("preprocessing"): if rerandomize: with tracer.context("randomization"): bkz.randomize_block(0, M.d, tracer=tracer, density=3) with tracer.context("reduction"): with tracer.context("lll"): bkz.lll_obj() for _ in range(4): bkz.tour( BKZ.EasyParam(min(max(M.d - preproc_offset, 2), STRATEGIES_MAX_DIM), flags=BKZ.GH_BND), tracer=tracer, ) if preproc_time is None: preproc_time = float( tracer.trace.child("preprocessing")["cputime"]) with tracer.context("check"): for v in M.B: ntests += 1 if predicate(v, standard_basis=True): found = True solution = tuple([int(v_) for v_ in v]) break if found: break with tracer.context("pruner"): preproc_cost = threads * preproc_time * 2 * 10**9 / 100 # 100 cycles per node with SuppressStream(): r = [] for i in range(M.d): r_, exp = M.get_r_exp(i, i) r.append(r_ * 2**(exp - ph)) (cost, prob), coeffs = cls.pruning_coefficients( squared_target_norm / 2**ph, r, preproc_cost, target_prob=target_prob) def callbackf(v): nonlocal ntests ntests += 1 return predicate(v, standard_basis=False) enum_obj = Enumeration(M, callbackf=callbackf) with tracer.context("enumeration", enum_obj=enum_obj, probability=prob, full=True): try: solutions = enum_obj.enumerate(0, M.d, squared_target_norm / 2**ph, ph, pruning=coeffs) _, v = solutions[0] found = True solution = tuple([int(v_) for v_ in M.B.multiply_left(v)]) break except EnumerationError: pass rerandomize = True remaining_probability *= 1 - prob tracer.exit() FPLLL.set_threads(1) b0, b0e = bkz.M.get_r_exp(0, 0) return USVPPredSolverResults( success=found, solution=solution, ntests=ntests + bkz_res.ntests, b0=b0**(0.5) * 2**(b0e / 2.0), cputime=tracer.trace.data["cputime"] + bkz_res.cputime, walltime=tracer.trace.data["walltime"] + bkz_res.walltime, data=tracer.trace, )
def test_simple_bkz_init(): for n in dimensions: FPLLL.set_random_seed(2**10 + n) A = make_integer_matrix(n) B = SimpleBKZ(copy(A)) del B
def test_simple_bkz_call(block_size=10): for cls in (SimpleBKZ, SimpleDualBKZ): for n in dimensions: FPLLL.set_random_seed(n) A = make_integer_matrix(n) cls(A)(block_size=block_size)