def extend( upper_limit: "compute up to this dimension (inclusive)", strategies_and_costs: "extend previously computed data", lower_limit: """compute starting at this dimension, if ``None`` lowest unknown dimension is chosen.""" = None, preprocessing: "function for selecting preprocessing block size" = "0.8605 * d - 14.04", lattice_type: "one of 'qary', 'qary-lv' or 'block'" = "qary", dump_filename: """results are regularly written to this filename, if ``None`` then ``../data/fplll-estimates-{lattice_type}.sobj`` is used.""" = None, ncores: "number of cores to use in parallel" = 4, greedy: "use Greedy pruning strategy" = False, ): """Estimate cost of enumeration for fixed preprocessing block size as a function of the dimension. """ dump_filename, strategies, costs, lower_limit = _prepare_parameters( dump_filename, strategies_and_costs, lower_limit, lattice_type, greedy=greedy, ) preprocessing = eval("lambda d: round({})".format(preprocessing)) if ncores > 1: workers = Pool(ncores) for step in range(lower_limit, upper_limit + 1, ncores): jobs, results = [], [] for i, d in enumerate(range(step, min(step + ncores, upper_limit + 1))): float_type = _pruner_precision(d, greedy) r = sample_r(d, lattice_type=lattice_type) preproc = preprocessing(d) jobs.append(( r, preproc - 1, strategies + [None] * i, costs, float_type, greedy, )) jobs.append(( r, preproc + 0, strategies + [None] * i, costs, float_type, greedy, )) jobs.append(( r, preproc + 1, strategies + [None] * i, costs, float_type, greedy, )) if ncores == 1: for job in jobs: results.append(cost_kernel(job)) else: results = workers.map(cost_kernel, jobs) for cost, strategy in results: try: if (costs[strategy.block_size]["total cost"] > cost["total cost"]): strategies[strategy.block_size] = strategy costs[strategy.block_size] = cost except IndexError: strategies.append(strategy) costs.append(cost) for d in range(len(jobs) // 3)[::-1]: print("%3d :: %5.1f %s" % ( strategies[-d - 1].block_size, log(costs[-d - 1]["total cost"], 2), strategies[-d - 1], )) pickle.dump((strategies, costs), open(dump_filename, "wb")) dump_strategies_json( dump_filename.replace(".sobj", "-strategies.json"), strategies) return strategies, costs
def block_strategize( upper_limit: "compute up to this dimension (inclusive)", lower_limit: """compute starting at this dimension, if ``None`` lowest unknown dimension is chosen.""" = None, c: "overshoot parameter" = 0.25, strategies_and_costs: "previously computed strategies and costs to extend" = None, lattice_type: "one of 'qary' or 'qary-lv'" = "qary", dump_filename: """results are regularly written to this filename, if ``None`` then ``data/fplll-block-simulations-{lattice_type}.sobj`` is used.""" = None, ncores: "number of cores to use in parallel" = 4, gh_factor: "set target_norm^2 to gh_factor * gh^2" = 1.00, rb: "compute pruning parameters for `GH^(i/rb)` for `i in -rb, …, rb`" = 1, greedy: "use Greedy pruning strategy" = False, sd: "use self-dual strategy" = False, preproc_loops: "number of preprocessing tours" = 2, ignore_preproc_cost: "assume all preprocessing has the cost of LLL regardless of block size" = False, ): """Estimate cost of enumeration. """ dump_filename, strategies, costs, lower_limit = _prepare_parameters( dump_filename, c, strategies_and_costs, lower_limit, lattice_type, preproc_loops, greedy, sd, ignore_preproc_cost, ) if ncores > 1: workers = Pool(ncores) from cost import sample_r, _pruner_precision for d in range(lower_limit, upper_limit + 1): D = int((1 + c) * d + 1) r = sample_r(D, lattice_type=lattice_type) float_type = _pruner_precision(d, greedy) try: start = max(strategies[d - 1].preprocessing_block_sizes[-1], 2) except IndexError: start = 2 if d < 60: stop = d else: stop = min(start + max(8, ncores), d) best = None for giant_step in range(start, stop, ncores): jobs, results = [], [] for baby_step in range(giant_step, min(stop, giant_step + ncores)): opts = { "greedy": greedy, "sd": sd, "gh_factor": gh_factor, "float_type": float_type, "radius_bound": rb, "preproc_loops": preproc_loops, "ignore_preproc_cost": ignore_preproc_cost, } jobs.append((r, d, c, baby_step, strategies, costs, opts)) if ncores == 1: for job in jobs: results.append(cost_kernel(job)) else: results = workers.map(cost_kernel, jobs) do_break = False for cost, strategy in results: logging.debug( "%3d :: C: %5.1f, P: %5.1f c: %.2f, %s" % (d, log(cost["total cost"], 2), log(cost["preprocessing"], 2), cost["c"], strategy) ) if best is None or cost["total cost"] < best[0]["total cost"]: best = cost, strategy if cost["total cost"] > 1.1 * best[0]["total cost"]: do_break = True break if do_break: break costs.append(best[0]) strategies.append(best[1]) logging.info( "%3d :: C: %5.1f, P: %5.1f c: %.2f, %s" % (d, log(costs[-1]["total cost"], 2), log(costs[-1]["preprocessing"], 2), costs[-1]["c"], strategies[-1]) ) pickle.dump((strategies, costs), open(dump_filename, "wb")) dump_strategies_json(dump_filename.replace(".sobj", "-strategies.json"), strategies) return strategies, costs
def strategize( upper_limit: "compute up to this dimension (inclusive)", lower_limit: """compute starting at this dimension, if ``None`` lowest unknown dimension is chosen.""" = None, strategies_and_costs: "previously computed strategies and costs to extend" = None, lattice_type: "one of 'qary' or 'qary-lv'" = "qary", dump_filename: """results are regularly written to this filename, if ``None`` then ``../data/fplll-estimates-{lattice_type}.sobj`` is used.""" = None, ncores: "number of cores to use in parallel" = 4, greedy: "use Greedy pruning strategy" = False, preproc_loops: "number of preprocessing tours" = 2, ): """Estimate cost of enumeration. """ dump_filename, strategies, costs, lower_limit = _prepare_parameters( dump_filename, strategies_and_costs, lower_limit, lattice_type, greedy=greedy, ) if ncores > 1: workers = Pool(ncores) for d in range(lower_limit, upper_limit + 1): r = sample_r(d, lattice_type=lattice_type) float_type = _pruner_precision(d, greedy) try: start = strategies[d - 1].preprocessing_block_sizes[-1] except IndexError: start = 2 if d < 60: stop = d else: stop = start + max(8, ncores) best = None for giant_step in range(start, stop, ncores): jobs, results = [], [] for baby_step in range(giant_step, min(stop, giant_step + ncores)): opts = { "greedy": greedy, "float_type": float_type, "preproc_loops": preproc_loops, } jobs.append((r, baby_step, strategies, costs, opts)) if ncores == 1: for job in jobs: results.append(cost_kernel(job)) else: results = workers.map(cost_kernel, jobs) do_break = False for cost, strategy in results: if best is None or cost["total cost"] < best[0]["total cost"]: best = cost, strategy if cost["total cost"] > 1.1 * best[0]["total cost"]: do_break = True break if do_break: break costs.append(best[0]) strategies.append(best[1]) logging.info("%3d :: %5.1f %s" % (d, log(costs[-1]["total cost"], 2), strategies[-1])) pickle.dump((strategies, costs), open(dump_filename, "wb")) dump_strategies_json( dump_filename.replace(".sobj", "-strategies.json"), strategies) return strategies, costs
def strategize(max_block_size, existing_strategies=None, min_block_size=3, nthreads=1, nsamples=50, pruner_method="hybrid", StrategizerFactory=ProgressivePreprocStrategizerFactory, dump_filename=None): """ *one* preprocessing block size + pruning. :param max_block_size: maximum block size to consider :param strategizers: strategizers to use :param existing_strategies: extend these previously computed strategies :param min_block_size: start at this block size :param nthreads: use this many threads :param nsamples: start using this many samples :param dump_filename: write strategies to this filename """ if dump_filename is None: dump_filename = "default-strategies-%s.json" % git_revision if existing_strategies is not None: strategies = existing_strategies times = [None] * len(strategies) else: strategies = [] times = [] for i in range(len(strategies), min_block_size): strategies.append(Strategy(i, [], [])) times.append(None) strategizer = PruningStrategizer for block_size in range(min_block_size, max_block_size + 1): logger.info("= block size: %3d, samples: %3d =", block_size, nsamples) state = [] try: p = max(strategies[-1].preprocessing_block_sizes[-1] - 4, 2) except (IndexError, ): p = 2 prev_best_total_time = None while p < block_size: if p >= 4: strategizer_p = type("PreprocStrategizer-%d" % p, (strategizer, StrategizerFactory(p)), {}) else: strategizer_p = strategizer strategy, stats, queries = discover_strategy( block_size, strategizer_p, strategies, nthreads=nthreads, nsamples=nsamples, ) stats = [stat for stat in stats if stat is not None] total_time = [float(stat.data["cputime"]) for stat in stats] svp_time = [ float(stat.find("enumeration").data["cputime"]) for stat in stats ] preproc_time = [ float(stat.find("preprocessing").data["cputime"]) for stat in stats ] total_time = sum(total_time) / len(total_time) svp_time = sum(svp_time) / len(svp_time) preproc_time = sum(preproc_time) / len(preproc_time) state.append((total_time, strategy, stats, strategizer, queries)) logger.info("%10.6fs, %10.6fs, %10.6fs, %s", total_time, preproc_time, svp_time, strategy) if prev_best_total_time and 1.3 * prev_best_total_time < total_time: break p += 2 if not prev_best_total_time or prev_best_total_time > total_time: prev_best_total_time = total_time best = find_best(state) total_time, strategy, stats, strategizer, queries = best strategies.append(strategy) dump_strategies_json(dump_filename, strategies) times.append((total_time, stats, queries)) logger.info("") logger.info("block size: %3d, time: %10.6fs, strategy: %s", block_size, total_time, strategy) logger.info("") if total_time > 0.1 and nsamples > max(2 * nthreads, 8): nsamples //= 2 return strategies, times
min_block_size=args.min_block_size, max_block_size=args.max_block_size, threads=args.threads, ) json_dict = OrderedDict() best = [Strategy(bs) for bs in range(args.max_block_size + 1)] for result in results: if not result: continue json_dict[result[0]["strategy"].block_size] = [] min_t = None for entry in result: d = OrderedDict() d["name"] = str(entry["strategy"]) d["total time"] = entry["total time"] d["length"] = entry["length"] json_dict[result[0]["strategy"].block_size].append(d) if min_t is None or min_t > entry["total time"]: best[result[0]["strategy"].block_size] = entry["strategy"] min_t = entry["total time"] json_name = "compare-%s.json" % (name) json.dump(json_dict, open(json_name, "w"), indent=4, sort_keys=False) best_name = "compare-best-%s.json" % (name) dump_strategies_json(best_name, best)
def strategize(max_block_size, existing_strategies=None, min_block_size=3, nthreads=1, nsamples=50, pruner_method="hybrid", StrategizerFactory=ProgressivePreprocStrategizerFactory, dump_filename=None): """ *one* preprocessing block size + pruning. :param max_block_size: maximum block size to consider :param strategizers: strategizers to use :param existing_strategies: extend these previously computed strategies :param min_block_size: start at this block size :param nthreads: use this many threads :param nsamples: start using this many samples :param dump_filename: write strategies to this filename """ if dump_filename is None: dump_filename = "default-strategies-%s.json"%git_revision if existing_strategies is not None: strategies = existing_strategies times = [None]*len(strategies) else: strategies = [] times = [] for i in range(len(strategies), min_block_size): strategies.append(Strategy(i, [], [])) times.append(None) strategizer = PruningStrategizer for block_size in range(min_block_size, max_block_size+1): logger.info("= block size: %3d, samples: %3d =", block_size, nsamples) state = [] try: p = max(strategies[-1].preprocessing_block_sizes[-1] - 4, 2) except (IndexError,): p = 2 prev_best_total_time = None while p < block_size: if p >= 4: strategizer_p = type("PreprocStrategizer-%d"%p, (strategizer, StrategizerFactory(p)), {}) else: strategizer_p = strategizer strategy, stats, queries = discover_strategy(block_size, strategizer_p, strategies, nthreads=nthreads, nsamples=nsamples, ) stats = [stat for stat in stats if stat is not None] total_time = [float(stat.data["cputime"]) for stat in stats] svp_time = [float(stat.find("enumeration").data["cputime"]) for stat in stats] preproc_time = [float(stat.find("preprocessing").data["cputime"]) for stat in stats] total_time = sum(total_time)/len(total_time) svp_time = sum(svp_time)/len(svp_time) preproc_time = sum(preproc_time)/len(preproc_time) state.append((total_time, strategy, stats, strategizer, queries)) logger.info("%10.6fs, %10.6fs, %10.6fs, %s", total_time, preproc_time, svp_time, strategy) if prev_best_total_time and 1.3*prev_best_total_time < total_time: break p += 2 if not prev_best_total_time or prev_best_total_time > total_time: prev_best_total_time = total_time best = find_best(state) total_time, strategy, stats, strategizer, queries = best strategies.append(strategy) dump_strategies_json(dump_filename, strategies) times.append((total_time, stats, queries)) logger.info("") logger.info("block size: %3d, time: %10.6fs, strategy: %s", block_size, total_time, strategy) logger.info("") if total_time > 0.1 and nsamples > 2*nthreads: nsamples /= 2 return strategies, times
strategiess.append(load_strategies_json(strategies)) results = compare_strategies(strategiess, nthreads=args.threads, nsamples=args.samples, min_block_size=args.min_block_size, max_block_size=args.max_block_size) json_dict = OrderedDict() best = [Strategy(bs) for bs in range(args.max_block_size+1)] for result in results: if not result: continue json_dict[result[0]["strategy"].block_size] = [] min_t = None for entry in result: d = OrderedDict() d["name"] = str(entry["strategy"]) d["total time"] = entry["total time"] d["length"] = entry["length"] json_dict[result[0]["strategy"].block_size].append(d) if min_t is None or min_t > entry["total time"]: best[result[0]["strategy"].block_size] = entry["strategy"] min_t = entry["total time"] json_name = "compare-%s.json"%(name) json.dump(json_dict, open(json_name, "w"), indent=4, sort_keys=False) best_name = "compare-best-%s.json"%(name) dump_strategies_json(best_name, best)