def test_linear_pruning(): A = IntegerMatrix.random(25, "qary", k=15, q=127) block_size = 10 preprocessing = 3 strategies = [Strategy(i) for i in range(5)] for b in range(5, block_size + 1): strategies.append( Strategy(b, [preprocessing], [Pruning.LinearPruning(b, 2)])) param = BKZ.Param(block_size=block_size, strategies=strategies) BKZ.reduction(A, param)
def _prepare_parameters(dump_filename, strategies_and_costs, lower_limit, lattice_type, greedy=False): if dump_filename is None: dump_filename = "../data/fplll-simulations,{lattice_type}{g}.sobj".format( lattice_type=lattice_type, g=",g" if greedy else "") if strategies_and_costs is not None: try: strategies, costs = strategies_and_costs except ValueError: strategies, costs = pickle.load(open(strategies_and_costs, "rb")) else: costs, strategies = [], [] for i in range(3): strategies.append(Strategy(i, [], [])) costs.append({"total cost": 0.0}) if lower_limit is None: lower_limit = len(strategies) else: strategies = strategies[:lower_limit] costs = costs[:lower_limit] return dump_filename, strategies, costs, lower_limit
def _prepare_parameters( dump_filename, c, strategies_and_costs, lower_limit, lattice_type, preproc_loops, greedy=False, sd=False, ignore_preproc_cost=False, ): if dump_filename is None: dump_filename = "../data/fplll-block-simulations,{lattice_type},{c:.2f},{preproc_loops:d}{g}{sd}{lb}.sobj".format( lattice_type=lattice_type, c=c, preproc_loops=preproc_loops, g=",g" if greedy else "", sd=",sd" if sd else "", lb=",lb" if ignore_preproc_cost else "", ) if strategies_and_costs is not None: try: strategies, costs = strategies_and_costs except ValueError: strategies, costs = pickle.load(open(strategies_and_costs, "rb")) else: costs, strategies = [], [] for i in range(3): strategies.append(Strategy(i, [], [])) costs.append({"total cost": 0.0}) if lower_limit is None: lower_limit = len(strategies) else: strategies = strategies[:lower_limit] costs = costs[:lower_limit] return dump_filename, strategies, costs, lower_limit
def enumeration_cost( r, d, c, preproc, strategies, costs, gh_factor=1.10, float_type="d", greedy=False, sd=False, radius_bound=1, preproc_loops=None, ignore_preproc_cost=False, ): """ Cost of enumeration on `r` using ``strategies``. :param r: squared Gram-Schmidt vectors :param d: enumeration dimension :param c: overshoot parameter :param preproc: preprocessing dimension :param strategies: prepcomputed strategies :param costs: precomputed costs for smaller dimensions :param gh_factor: target GH_FACTOR * GH :param float_type: float type to use in pruner :param greedy: use Greedy pruning strategy. :param sd: use self-dual strategy :param radius_bound: compute pruning parameters for `GH^(i/radius_bound)` for `i in -radius_bound, …, radius_bound` :param preproc_loops: number of loops to perform preprocessing for :param ignore_preproc_cost: assume all preprocessing has the cost of LLL regardless of block size. """ from cost import lll_cost, pruning_coefficients D = int((1 + c) * d) if preproc is None or preproc == 2: preproc_cost = lll_cost(D) r_ = list(r) else: if sd: f = bkz_simulatef( SDProcrastinatingBKZQualitySimulation, init_kwds={"preprocessing_levels": 1, "preprocessing_cutoff": 45}, call_kwds={"c": c}, ) r_, preproc_cost = preprocess(r[:D], c, preproc, strategies, costs, max_loops=preproc_loops, bkz_simulate=f) # each SD tour costs as much as one BKZ tour preproc_cost = 2 * preproc_cost else: f = bkz_simulatef( ProcrastinatingBKZQualitySimulation, init_kwds={"preprocessing_levels": 1, "preprocessing_cutoff": 45}, call_kwds={"c": c}, ) r_, preproc_cost = preprocess(r[:D], c, preproc, strategies, costs, max_loops=preproc_loops, bkz_simulate=f) if ignore_preproc_cost: preproc_cost = lll_cost(D) gh = gaussian_heuristic(r_[:d]) target_norm = gh_factor * gh pc = pruning_coefficients(r_[:d], preproc_cost, radius_bound=radius_bound, float_type=float_type, greedy=greedy) strategy = Strategy( d, preprocessing_block_sizes=[preproc] * preproc_loops if preproc > 2 else [], pruning_parameters=pc ) pr = strategy.get_pruning(target_norm, gh) pruner = Pruning.Pruner( target_norm, preproc_cost, [r_[:d]], target=0.51, float_type=float_type, flags=Pruning.HALF if greedy else Pruning.GRADIENT | Pruning.HALF, ) cost = { "total cost": preproc_cost + pruner.repeated_enum_cost(pr.coefficients), "single enum": pruner.single_enum_cost(pr.coefficients), "preprocessing": preproc_cost, "c": c, "probability": pruner.measure_metric(pr.coefficients), } logging.debug( "%3d :: C: %5.1f, P: %5.1f c: %.2f, %s" % (d, log(cost["total cost"], 2), log(cost["preprocessing"], 2), cost["c"], strategy) ) return cost, strategy
def enumeration_cost( r, preproc, strategies, costs, gh_factor=1.00, preproc_loops=1, target_success_probability=0.51, float_type="d", greedy=False, ): """ Cost of enumeration on `r` using ``strategies``. :param r: squared Gram-Schmidt vectors :param strategies: prepcomputed strategies :param costs: precomputed costs for smaller dimensions :param gh_factor: target GH_FACTOR * GH :param max_loops: number of preprocessing loops :param float_type: float type to use in pruner :param greedy: use Greedy pruning strategy. """ d = len(r) if preproc is None: preproc_cost = lll_cost(d) else: r, preproc_cost = preprocess(r, preproc, strategies, costs, max_loops=preproc_loops) gh = gaussian_heuristic(r) target_norm = min(gh_factor * gh, r[0]) pc = pruning_coefficients(r, preproc_cost, float_type=float_type, greedy=greedy) strategy = Strategy( d, preprocessing_block_sizes=[preproc] * preproc_loops if preproc > 2 else [], pruning_parameters=pc, ) pr = strategy.get_pruning(target_norm, gh) pruner = Pruning.Pruner( target_norm, preproc_cost, [r], target=target_success_probability, float_type=float_type, flags=Pruning.HALF if greedy else Pruning.GRADIENT | Pruning.HALF, ) cost = { "total cost": preproc_cost + pruner.repeated_enum_cost(pr.coefficients), "single enum": pruner.single_enum_cost(pr.coefficients), "preprocessing": preproc_cost, "probability": pruner.measure_metric(pr.coefficients), } return cost, strategy
def strategize(max_block_size, existing_strategies=None, min_block_size=3, nthreads=1, nsamples=50, pruner_method="hybrid", StrategizerFactory=ProgressivePreprocStrategizerFactory, dump_filename=None): """ *one* preprocessing block size + pruning. :param max_block_size: maximum block size to consider :param strategizers: strategizers to use :param existing_strategies: extend these previously computed strategies :param min_block_size: start at this block size :param nthreads: use this many threads :param nsamples: start using this many samples :param dump_filename: write strategies to this filename """ if dump_filename is None: dump_filename = "default-strategies-%s.json" % git_revision if existing_strategies is not None: strategies = existing_strategies times = [None] * len(strategies) else: strategies = [] times = [] for i in range(len(strategies), min_block_size): strategies.append(Strategy(i, [], [])) times.append(None) strategizer = PruningStrategizer for block_size in range(min_block_size, max_block_size + 1): logger.info("= block size: %3d, samples: %3d =", block_size, nsamples) state = [] try: p = max(strategies[-1].preprocessing_block_sizes[-1] - 4, 2) except (IndexError, ): p = 2 prev_best_total_time = None while p < block_size: if p >= 4: strategizer_p = type("PreprocStrategizer-%d" % p, (strategizer, StrategizerFactory(p)), {}) else: strategizer_p = strategizer strategy, stats, queries = discover_strategy( block_size, strategizer_p, strategies, nthreads=nthreads, nsamples=nsamples, ) stats = [stat for stat in stats if stat is not None] total_time = [float(stat.data["cputime"]) for stat in stats] svp_time = [ float(stat.find("enumeration").data["cputime"]) for stat in stats ] preproc_time = [ float(stat.find("preprocessing").data["cputime"]) for stat in stats ] total_time = sum(total_time) / len(total_time) svp_time = sum(svp_time) / len(svp_time) preproc_time = sum(preproc_time) / len(preproc_time) state.append((total_time, strategy, stats, strategizer, queries)) logger.info("%10.6fs, %10.6fs, %10.6fs, %s", total_time, preproc_time, svp_time, strategy) if prev_best_total_time and 1.3 * prev_best_total_time < total_time: break p += 2 if not prev_best_total_time or prev_best_total_time > total_time: prev_best_total_time = total_time best = find_best(state) total_time, strategy, stats, strategizer, queries = best strategies.append(strategy) dump_strategies_json(dump_filename, strategies) times.append((total_time, stats, queries)) logger.info("") logger.info("block size: %3d, time: %10.6fs, strategy: %s", block_size, total_time, strategy) logger.info("") if total_time > 0.1 and nsamples > max(2 * nthreads, 8): nsamples //= 2 return strategies, times
def discover_strategy(block_size, Strategizer, strategies, nthreads=1, nsamples=50): """Discover a strategy using ``Strategizer`` :param block_size: block size to try :param Strategizer: strategizer to use :param strategies: strategies for smaller block sizes :param nthreads: number of threads to run :param nsamples: number of lattice bases to consider :param subprocess: """ connections = [] processes = [] k = nthreads m = nsamples strategizer = Strategizer(block_size) # everybody is alive in the beginning alive = range(m) return_queue = Queue() for i in range(m): manager, worker = Pipe() connections.append((manager, worker)) strategies_ = list(strategies) strategies_.append(Strategizer.Strategy(block_size, worker)) # note: success probability, rerandomisation density etc. can be adapted here param = Param(block_size=block_size, strategies=strategies_, flags=BKZ.GH_BND) process = Process(target=worker_process, args=(2**16 * block_size + i, param, return_queue)) processes.append(process) callback = [None] * m for chunk in chunk_iterator(alive, k): for i in chunk: process = processes[i] process.start() manager, worker = connections[i] worker.close() connections[i] = manager # wait for `k` responses for i in chunk: callback[i] = connections[i].recv() assert all(callback) # everybody wants preprocessing parameters preproc_params = strategizer(callback) callback = callback_roundtrip(alive, k, connections, preproc_params) assert all(callback) # everybody wants pruning parameters pruning_params = strategizer(callback) callback = callback_roundtrip(alive, k, connections, pruning_params) assert not any(callback) # no more questions strategy = Strategy(block_size=block_size, preprocessing_block_sizes=preproc_params, pruning_parameters=pruning_params) active_children() stats = [] for i in range(m): stats.append(return_queue.get()) return strategy, tuple(stats), tuple(strategizer.queries)
strategiess.append(load_strategies_json(strategies.encode("ascii"))) if not len(strategiess): raise ValueError("You must provide at least one strategy to compare.") results = compare_strategies( strategiess, jobs=args.jobs, nsamples=args.samples, min_block_size=args.min_block_size, max_block_size=args.max_block_size, threads=args.threads, ) json_dict = OrderedDict() best = [Strategy(bs) for bs in range(args.max_block_size + 1)] for result in results: if not result: continue json_dict[result[0]["strategy"].block_size] = [] min_t = None for entry in result: d = OrderedDict() d["name"] = str(entry["strategy"]) d["total time"] = entry["total time"] d["length"] = entry["length"] json_dict[result[0]["strategy"].block_size].append(d)
def svp_challenge( upper_limit: "compute up to this dimension (inclusive)", strategies_and_costs: "previously computed strategies and costs to extend", lower_limit: """compute starting at this dimension, if ``None`` lowest unknown dimension is chosen.""" = None, dump_filename: """results are regularly written to this filename, if ``None`` then ``data/fplll-estimates-{lattice_type}.sobj`` is used.""" = None, ncores: "number of cores to use in parallel" = 4, ): from cost import _pruner_precision if dump_filename is None: dump_filename = os.path.join("data", "fplll-simulations,svp-challenge.sobj") if strategies_and_costs is not None: try: strategies, costs = strategies_and_costs except ValueError: strategies, costs = pickle.load(open(strategies_and_costs, "rb")) else: costs, strategies = [], [] for i in range(3): strategies.append(Strategy(i, [], [])) costs.append({"total cost": 0.0}) if ncores > 1: workers = Pool(ncores) scc = OrderedDict() for d in range(lower_limit, upper_limit + 1): try: r = load_svp_challenge_r(d, seed=0) except FileNotFoundError: continue float_type, precision = _pruner_precision(d) try: start = max(strategies[d].preprocessing_block_sizes[0] - 16, 2) except (KeyError, IndexError): start = 2 stop = d best = None for giant_step in range(start, stop, ncores): jobs, results = [], [] for baby_step in range(giant_step, min(stop, giant_step + ncores)): jobs.append((r, baby_step, strategies, costs, float_type)) if ncores == 1: for job in jobs: results.append(cost_kernel(job)) else: results = workers.map(cost_kernel, jobs) do_break = False for cost in results: if best is None or cost["total cost"] < best["total cost"]: best = cost if cost["total cost"] > 2 * best["total cost"]: do_break = True break if do_break: break scc[d] = best logging.info( "%3d :: %5.1f, %3d" % (d, log(best["total cost"], 2), best["preprocessing block size"])) pickle.dump(scc, open(dump_filename, "wb"))