def plain_sieve(): """ Run a a plain sieve in the full dimension directly. """ description = plain_sieve.__doc__ args, all_params = parse_args(description,) stats = run_all(plain_sieve_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:50s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, |db|: 2^{avg_max:.2f}" profiles = print_stats(fmt, stats, ("cputime", "walltime", "avg_max"), extractf={"avg_max": lambda n, params, stat: db_stats(stat)[0]}) output_profiles(args.profile, profiles) if args.pickle: pickler.dump(stats, open("plain-sieve-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb"))
def asvp(): """ Run a Workout until 1.05-approx-SVP on matrices with dimensions in ``range(lower_bound, upper_bound, step_size)``. """ description = asvp.__doc__ args, all_params = parse_args(description, load_matrix=None, verbose=True, challenge_seed=0, workout__dim4free_min=30, workout__dim4free_dec=2) stats = run_all(asvp_kernel, all_params.values(), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in all_params.iteritems()]) for (n, params) in stats: stat = stats[(n, params)] cputime = sum([float(node["cputime"]) for node in stat])/len(stat) walltime = sum([float(node["walltime"]) for node in stat])/len(stat) quality = sum([float(node["quality"]) for node in stat])/len(stat) avr_db, max_db = db_stats(stat) fmt = "%48s :: m: %1d, n: %2d, cputime :%7.4fs, walltime :%7.4fs, quality : %2.4f" # noqa logging.info(fmt % (inverse_all_params[params], params.threads, n, cputime, walltime, quality)) if args.pickle: pickler.dump(stats, open("hkz-asvp-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb"))
def plain_sieve(): """ Run a a plain sieve in the full dimension directly. """ description = plain_sieve.__doc__ args, all_params = parse_args(description,) stats = run_all(plain_sieve_kernel, all_params.values(), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in all_params.iteritems()]) for (n, params) in stats: stat = stats[(n, params)] cputime = sum([float(node["cputime"]) for node in stat])/len(stat) walltime = sum([float(node["walltime"]) for node in stat])/len(stat) avr_db, max_db = db_stats(stat) fmt = "%48s :: m: %1d, n: %2d, cputime :%7.4fs, walltime :%7.4fs, avr_max |db|: 2^%2.2f, max_max db |db|: 2^%2.2f" # noqa logging.info(fmt %(inverse_all_params[params], params.threads, n, cputime, walltime, avr_db, max_db)) if args.pickle: pickler.dump(stats, open("plain-sieve-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb"))
def asvp(n, params, threads): lower_bound = n # lowest lattice dimension to consider (inclusive) upper_bound = 0 # upper bound on lattice dimension to consider (exclusive) step_size = 2 # increment lattice dimension in these steps trials = 1 # number of experiments to run per dimension workers = 1 # number of parallel experiments to run seed = int.from_bytes(os.urandom(8), 'big') # randomness seed all_params = OrderedDict({f"'threads': {threads}, ": params}) stats = run_all(asvp_kernel, list(all_params.values()), lower_bound=lower_bound, upper_bound=upper_bound, step_size=step_size, trials=trials, workers=workers, seed=seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in all_params.items()]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:20s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, "\ "flast: {flast:3.2f}, |db|: 2^{avg_max:.2f}" print_stats(fmt, stats, ("cputime", "walltime", "flast", "avg_max"), extractf={"avg_max": lambda n, params, stat: db_stats(stat)[0]}) res = list(stats.values())[0][0].data['res'] return res
def lwe(): """ Attempt to solve an lwe challenge. """ description = lwe.__doc__ args, all_params = parse_args(description, lwe__alpha=0.005, lwe__m=None, lwe__goal_margin=1.5, lwe__svp_bkz_time_factor=1, bkz__blocksizes=None, bkz__tours=1, bkz__jump=1, bkz__extra_dim4free=12, bkz__fpylll_crossover=51, bkz__dim4free_fun="default_dim4free_fun", pump__down_sieve=True, dummy_tracer=True, # set to control memory verbose=True ) stats = run_all(lwe_kernel, all_params.values(), # noqa lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed)
def svp(): """ Run a progressive until 1.05-approx-SVP on matrices with dimensions in ``range(lower_bound, upper_bound, step_size)``. """ description = svp.__doc__ args, all_params = parse_args(description, workout__dim4free_dec=2, challenge_seed=0) run_all(svp_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed)
def bkz_tour(): """ Run bkz tours. .. note :: that by default no information is printed. To enable set ``--dummy-tracer False`` and ``--verbose``. """ description = bkz_tour.__doc__ args, all_params = parse_args( description, bkz__alg="pump_and_jump", bkz__blocksizes="40:51:2", bkz__pre_blocksize=39, bkz__tours=1, bkz__extra_dim4free=0, bkz__jump=1, bkz__dim4free_fun="default_dim4free_fun", slide__overlap=1, pump__down_sieve=True, challenge_seed=0, dummy_tracer=False, # set to control memory verbose=False) stats = run_all(bkz_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:50s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, slope: {slope:1.5f}, |db|: 2^{avg_max:.2f}" profiles = print_stats(fmt, stats, ("cputime", "walltime", "slope", "avg_max"), extractf={ "avg_max": lambda n, params, stat: db_stats(stat)[0] }) output_profiles(args.profile, profiles) if args.pickle: pickler.dump( stats, open( "bkz-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb"))
def bkz_tour(): """ Run bkz tours. .. note :: that by default no information is printed. To enable set ``--dummy-tracer False`` and ``--verbose``. """ description = bkz_tour.__doc__ args, all_params = parse_args( description, bkz__alg="pump_and_jump", bkz__blocksizes="40:51:2", bkz__pre_blocksize=39, bkz__tours=1, bkz__extra_dim4free=0, bkz__jump=1, bkz__dim4free_fun="default_dim4free_fun", pump__down_sieve=True, challenge_seed=0, dummy_tracer=True, # set to control memory verbose=False) stats = run_all(bkz_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats2 = OrderedDict() for (n, params), v in six.iteritems(stats): params_name = inverse_all_params[params] params_name = re.sub("'challenge_seed': [0-9]+,", "", params_name) params = params.new(challenge_seed=None) stats2[(n, params_name)] = stats2.get((n, params_name), []) + v stats = stats2 for (n, params) in stats: stat = stats[(n, params)] if stat[0]: # may be None if dummy_tracer is used cputime = sum([float(node["cputime"]) for node in stat]) / len(stat) walltime = sum([float(node["walltime"]) for node in stat]) / len(stat) fmt = "%48s :: n: %2d, cputime :%7.4fs, walltime :%7.4fs" logging.info(fmt % (params, n, cputime, walltime))
def asvp(): """ Run a Workout until 1.05-approx-SVP on matrices with dimensions in ``range(lower_bound, upper_bound, step_size)``. """ description = asvp.__doc__ args, all_params = parse_args( description, load_matrix=None, verbose=True, challenge_seed=0, workout__dim4free_dec=3, ) stats = run_all( asvp_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed, ) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:50s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, flast: {flast:3.2f}, |db|: 2^{avg_max:.2f}" profiles = print_stats( fmt, stats, ("cputime", "walltime", "flast", "avg_max"), extractf={ "avg_max": lambda n, params, stat: db_stats(stat)[0] }, ) output_profiles(args.profile, profiles) if args.pickle: pickler.dump( stats, open( "svp-challenge-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb", ), )
def svp(): """ Run a progressive until exact-SVP is solved. The exact-SVP length must have been priorly determined using ./svp_exact_find_norm.py """ description = svp.__doc__ args, all_params = parse_args(description, challenge_seed=0, svp__alg="workout") stats = run_all(svp_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats2 = OrderedDict() for (n, params), v in six.iteritems(stats): params_name = inverse_all_params[params] params_name = re.sub("'challenge_seed': [0-9]+,", "", params_name) params = params.new(challenge_seed=None) stats2[(n, params_name)] = stats2.get((n, params_name), []) + v stats = stats2 for (n, params) in stats: stat = stats[(n, params)] cputime = sum([float(node["cputime"]) for node in stat]) / len(stat) walltime = sum([float(node["walltime"]) for node in stat]) / len(stat) flast = sum([float(node["flast"]) for node in stat]) / len(stat) avr_db, max_db = db_stats(stat) fmt = "%100s :: n: %2d, cputime :%7.4fs, walltime :%7.4fs, flast : %2.2f, , avr_max db: 2^%2.2f, max_max db: 2^%2.2f" # noqa logging.info(fmt % (params, n, cputime, walltime, flast, avr_db, max_db)) if args.pickle: pickler.dump( stats, open( "hkz-svp-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb"))
def svp(): """ Run a progressive until exact-SVP is solved. The exact-SVP length must have been priorly determined using ./svp_exact_find_norm.py """ description = svp.__doc__ args, all_params = parse_args(description, challenge_seed=0, svp__alg="workout") stats = run_all( svp_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed, ) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:50s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, flast: {flast:3.2f}, |db|: 2^{avg_max:.2f}" profiles = print_stats( fmt, stats, ("cputime", "walltime", "flast", "avg_max"), extractf={ "avg_max": lambda n, params, stat: db_stats(stat)[0] }, ) output_profiles(args.profile, profiles) if args.pickle: pickler.dump( stats, open( "svp-exact-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb", ), )
def full_sieve(): """ Run a a full sieve (with some partial sieve as precomputation). """ description = full_sieve.__doc__ args, all_params = parse_args(description, challenge_seed=0) stats = run_all( full_sieve_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed, ) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:50s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, |db|: 2^{avg_max:.2f}" profiles = print_stats( fmt, stats, ("cputime", "walltime", "avg_max"), extractf={ "avg_max": lambda n, params, stat: db_stats(stat)[0] }, ) output_profiles(args.profile, profiles) if args.pickle: pickler.dump( stats, open( "full-sieve-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb", ), )
def hkz(): """ Attempt HKZ reduction. """ description = hkz.__doc__ args, all_params = parse_args(description, challenge_seed=0, pump__down_sieve=True, pump__down_stop=9999, saturation_ratio=.8, pump__prefer_left_insert=10, workout__dim4free_min=0, workout__dim4free_dec=15) stats = run_all(hkz_kernel, list(all_params.values()), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:50s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, |db|: 2^{avg_max:.2f}" profiles = print_stats(fmt, stats, ("cputime", "walltime", "avg_max"), extractf={ "avg_max": lambda n, params, stat: db_stats(stat)[0] }) output_profiles(args.profile, profiles) if args.pickle: pickler.dump( stats, open( "hkz-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb"))
def find_norm(load_matrix, n, threads=4, **kwds): """ find `goal_r0` for a given matrix :param load_matrix: file that keeps the matrix :param n: dimension of the matrix :param threads: ... (default: 4) :param workout__dim4free_dec: By how much do we decreaseee dim4free at each iteration (default: 2) :param verbose: ... (default: False) :param n_matches: max match trials for kernel (default: 5) """ lower_bound = n # lowest lattice dimension to consider (inclusive) upper_bound = 0 # upper bound on lattice dimension to consider (exclusive) step_size = 2 # increment lattice dimension in these steps trials = 1 # number of experiments to run per dimension workers = 1 # number of parallel experiments to run seed = int.from_bytes(os.urandom(8), 'big') # randomness seed workout__dim4free_dec = kwds.get('workout__dim4free_dec', 2) verbose = kwds.get('verbose', False) n_matches = kwds.get('n_matches', 5) params = SieverParams(load_matrix=load_matrix, n_matches=n_matches, threads=threads, verbose=verbose) params['workout/dim4free_dec'] = workout__dim4free_dec all_params = OrderedDict({f"'threads': {threads}, ": params}) res = run_all(find_norm_kernel, list(all_params.values()), lower_bound=lower_bound, upper_bound=upper_bound, step_size=step_size, trials=trials, workers=workers, seed=seed) # __import__('IPython').embed() goal_r0 = list(res.values())[0][0] return goal_r0
def full_sieve(): """ Run a a full sieve (with some partial sieve as precomputation). """ description = full_sieve.__doc__ args, all_params = parse_args(description, trace=True) stats = run_all(full_sieve_kernel, all_params.values(), lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in all_params.iteritems()]) for (n, params) in stats: stat = stats[(n, params)] if stat[0] is None: logging.info("Trace disabled") continue cputime = sum([float(node["cputime"]) for node in stat]) / len(stat) walltime = sum([float(node["walltime"]) for node in stat]) / len(stat) avr_db, max_db = db_stats(stat) fmt = "%48s :: m: %1d, n: %2d, cputime :%7.4fs, walltime :%7.4fs, avr_max |db|: 2^%2.2f, max_max db |db|: 2^%2.2f" # noqa logging.info(fmt % (inverse_all_params[params], params.threads, n, cputime, walltime, avr_db, max_db)) if args.pickle: pickler.dump( stats, open( "full-sieve-%d-%d-%d-%d.sobj" % (args.lower_bound, args.upper_bound, args.step_size, args.trials), "wb"))
def ntru_kernel(arg0, params=None, seed=None): """ Run the primal attack against Darmstadt LWE instance (n, alpha). :param n: the dimension of the LWE-challenge secret :param params: parameters for LWE: - lwe/alpha: the noise rate of the LWE-challenge - lwe/m: the number of samples to use for the primal attack - lwe/goal_margin: accept anything that is goal_margin * estimate(length of embedded vector) as an lwe solution - lwe/svp_bkz_time_factor: if > 0, run a larger pump when svp_bkz_time_factor * time(BKZ tours so far) is expected to be enough time to find a solution - bkz/blocksizes: given as low:high:inc perform BKZ reduction with blocksizes in range(low, high, inc) (after some light) prereduction - bkz/tours: the number of tours to do for each blocksize - bkz/jump: the number of blocks to jump in a BKZ tour after each pump - bkz/extra_dim4free: lift to indices extra_dim4free earlier in the lattice than the currently sieved block - bkz/fpylll_crossover: use enumeration based BKZ from fpylll below this blocksize - bkz/dim4free_fun: in blocksize x, try f(x) dimensions for free, give as 'lambda x: f(x)', e.g. 'lambda x: 11.5 + 0.075*x' - pump/down_sieve: sieve after each insert in the pump-down phase of the pump - dummy_tracer: use a dummy tracer which captures less information - verbose: print information throughout the lwe challenge attempt """ # Pool.map only supports a single parameter if params is None and seed is None: n, params, seed = arg0 else: n = arg0 params = copy.copy(params) # params for underlying BKZ extra_dim4free = params.pop("bkz/extra_dim4free") jump = params.pop("bkz/jump") dim4free_fun = params.pop("bkz/dim4free_fun") pump_params = pop_prefixed_params("pump", params) fpylll_crossover = params.pop("bkz/fpylll_crossover") blocksizes = params.pop("bkz/blocksizes") tours = params.pop("bkz/tours") # flow of the lwe solver svp_bkz_time_factor = params.pop("lwe/svp_bkz_time_factor") goal_margin = params.pop("lwe/goal_margin") # generation of lwe instance and Kannan's embedding alpha = params.pop("lwe/alpha") m = params.pop("lwe/m") decouple = svp_bkz_time_factor > 0 # misc dont_trace = params.pop("dummy_tracer") verbose = params.pop("verbose") filename = 'ntru_n_'+str(n)+'.txt' H, q = read_ntru_from_file(filename) print("-------------------------") print("Hybrid attack on NTRU n=%d" %n) # compute the attack parameters paramset_NTRU1 = {'n': n, 'q': q, 'w': 2*(n/3)} print(paramset_NTRU1) beta, g, rt = plain_hybrid_compleixty(paramset_NTRU1, verbose = True) B = ntru_plain_hybrid_basis(A, g, q, m=m) g6k = Siever(B, params) print("GSO precision: ", g6k.M.float_type) if dont_trace: tracer = dummy_tracer else: tracer = SieveTreeTracer(g6k, root_label=("ntru"), start_clocks=True) d = g6k.full_n g6k.lll(0, g6k.full_n) slope = basis_quality(g6k.M)["/"] print("Intial Slope = %.5f\n" % slope) T0 = time.time() T0_BKZ = time.time() for blocksize in blocksizes: for tt in range(tours): # BKZ tours if blocksize < fpylll_crossover: if verbose: print("Starting a fpylll BKZ-%d tour. " % (blocksize), end=' ') sys.stdout.flush() bkz = BKZReduction(g6k.M) par = fplll_bkz.Param(blocksize, strategies=fplll_bkz.DEFAULT_STRATEGY, max_loops=1) bkz(par) else: if verbose: print("Starting a pnjBKZ-%d tour. " % (blocksize)) pump_n_jump_bkz_tour(g6k, tracer, blocksize, jump=jump, verbose=verbose, extra_dim4free=extra_dim4free, dim4free_fun=dim4free_fun, goal_r0=target_norm, pump_params=pump_params) T_BKZ = time.time() - T0_BKZ if verbose: slope = basis_quality(g6k.M)["/"] fmt = "slope: %.5f, walltime: %.3f sec" print(fmt % (slope, time.time() - T0)) g6k.lll(0, g6k.full_n) if g6k.M.get_r(0, 0) <= target_norm: break # overdoing n_max would allocate too much memory, so we are careful svp_Tmax = svp_bkz_time_factor * T_BKZ n_max = int(58 + 2.85 * log(svp_Tmax * params.threads)/log(2.)) rr = [g6k.M.get_r(i, i) for i in range(d)] for n_expected in range(2, d-2): x = (target_norm/goal_margin) * n_expected/(1.*d) if 4./3 * gaussian_heuristic(rr[d-n_expected:]) > x: break print("Without otf, would expect solution at pump-%d. n_max=%d in the given time." % (n_expected, n_max)) # noqa if n_expected >= n_max - 1: continue n_max += 1 # Larger SVP llb = d - blocksize while gaussian_heuristic([g6k.M.get_r(i, i) for i in range(llb, d)]) < target_norm * (d - llb)/(1.*d): # noqa llb -= 1 f = d-llb-n_max if verbose: print("Starting svp pump_{%d, %d, %d}, n_max = %d, Tmax= %.2f sec" % (llb, d-llb, f, n_max, svp_Tmax)) # noqa pump(g6k, tracer, llb, d-llb, f, verbose=verbose, goal_r0=target_norm * (d - llb)/(1.*d)) if verbose: slope = basis_quality(g6k.M)["/"] fmt = "\n slope: %.5f, walltime: %.3f sec" print(fmt % (slope, time.time() - T0)) print() g6k.lll(0, g6k.full_n) T0_BKZ = time.time() if g6k.M.get_r(0, 0) <= target_norm: break if g6k.M.get_r(0, 0) <= target_norm: print("Finished! TT=%.2f sec" % (time.time() - T0)) print(g6k.M.B[0]) alpha_ = int(alpha*1000) filename = 'lwechallenge/%03d-%03d-solution.txt' % (n, alpha_) fn = open(filename, "w") fn.write(str(g6k.M.B[0])) fn.close() return """ raise ValueError("No solution found.") def ntru(): """ Attempt to solve an ntru instance. """ description = ntru.__doc__ args, all_params = parse_args(description, ntru__m=None, lwe__goal_margin=1.5, lwe__svp_bkz_time_factor=1, bkz__blocksizes=None, bkz__tours=1, bkz__jump=1, bkz__extra_dim4free=12, bkz__fpylll_crossover=51, bkz__dim4free_fun="default_dim4free_fun", pump__down_sieve=True, dummy_tracer=True, # set to control memory verbose=True ) stats = run_all(ntru_kernel, list(all_params.values()), # noqa lower_bound=args.lower_bound, upper_bound=args.upper_bound, step_size=args.step_size, trials=args.trials, workers=args.workers, seed=args.seed)
def solve_svp(A, goal_r0=None, threads=4, **kwds): """ A G6K Exact-SVP Solver :param A: basis matrix (repr. in list) :param goal_r0: ... Quit when this is reached. If it's None, use the result of `find_norm` with `n_matches` = 3 !!! may take long time running `find_norm` !!! :param threads: ... (default: 4) :param keep_tmpfile: keep the reduced matrix (default: False) :param load_matrix: filename for temp matrix file :param verbose: ... (default: True) :param alg: algorithm used to solve svp, choosen in 'enum', 'duc18', 'workout'(default) :param debug: ... (default: False) """ n = len(A) keep_tmpfile = kwds.get('keep_tmpfile', False) load_matrix = kwds.get('load_matrix', f'svpchallenge-{n}.txt') verbose = kwds.get('verbose', True) alg = kwds.get('alg', 'workout') debug = kwds.get('debug', False) lower_bound = n # lowest lattice dimension to consider (inclusive) upper_bound = 0 # upper bound on lattice dimension to consider (exclusive) step_size = 2 # increment lattice dimension in these steps trials = 1 # number of experiments to run per dimension workers = 1 # number of parallel experiments to run seed = int.from_bytes(os.urandom(8), 'big') # randomness seed with open(load_matrix, 'w') as f: f.write(str_mat(A)) if goal_r0 is None: goal_r0 = find_norm(load_matrix, n, verbose=verbose, n_matches=3) if verbose: print(f"goal_r0 = {goal_r0}") params = SieverParams(load_matrix=load_matrix, threads=threads, goal_r0=goal_r0, verbose=verbose) params['svp/alg'] = alg all_params = OrderedDict({f"'threads': {threads}, ": params}) stats = run_all(svp_kernel, list(all_params.values()), lower_bound=lower_bound, upper_bound=upper_bound, step_size=step_size, trials=trials, workers=workers, seed=seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in all_params.items()]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:20s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, "\ "flast: {flast:3.2f}, |db|: 2^{avg_max:.2f}" print_stats(fmt, stats, ("cputime", "walltime", "flast", "avg_max"), extractf={ "avg_max": lambda n, params, stat: db_stats(stat)[0] }) res = list(stats.values())[0][0].data['res'] if debug: __import__('IPython').embed() if keep_tmpfile: with open(load_matrix, 'w') as f: f.write(str(res)) else: os.system(f'rm -f {load_matrix}') res = [[res[i, j] for j in range(res.ncols)] for i in range(res.nrows)] return res