def __call__(cls, M, predicate, block_size, invalidate_cache=lambda: None, threads=1, max_loops=8, **kwds): params = SieverParams(threads=threads) g6k = Siever(M, params) tracer = SieveTreeTracer(g6k, root_label="bkz-sieve", start_clocks=True) for b in range(20, block_size + 1, 10): pump_n_jump_bkz_tour(g6k, tracer, b, pump_params={"down_sieve": True}) auto_abort = BKZ.AutoAbort(M, M.d) found, ntests, solution = False, 0, None for tour in range(max_loops): pump_n_jump_bkz_tour(g6k, tracer, block_size, pump_params={"down_sieve": True}) invalidate_cache() if auto_abort.test_abort(): break with tracer.context("check"): for i, v in enumerate(M.B): ntests += 1 if predicate(v, standard_basis=True): solution = tuple([int(v_) for v_ in v]) found = True break if found: break tracer.exit() b0, b0e = M.get_r_exp(0, 0) return USVPPredSolverResults( success=found, ntests=ntests, solution=solution, b0=b0**(0.5) * 2**(b0e / 2.0), cputime=tracer.trace.data["cputime"], walltime=tracer.trace.data["walltime"], data=tracer.trace, )
def find_norm(load_matrix, n, threads=4, **kwds): """ find `goal_r0` for a given matrix :param load_matrix: file that keeps the matrix :param n: dimension of the matrix :param threads: ... (default: 4) :param workout__dim4free_dec: By how much do we decreaseee dim4free at each iteration (default: 2) :param verbose: ... (default: False) :param n_matches: max match trials for kernel (default: 5) """ lower_bound = n # lowest lattice dimension to consider (inclusive) upper_bound = 0 # upper bound on lattice dimension to consider (exclusive) step_size = 2 # increment lattice dimension in these steps trials = 1 # number of experiments to run per dimension workers = 1 # number of parallel experiments to run seed = int.from_bytes(os.urandom(8), 'big') # randomness seed workout__dim4free_dec = kwds.get('workout__dim4free_dec', 2) verbose = kwds.get('verbose', False) n_matches = kwds.get('n_matches', 5) params = SieverParams(load_matrix=load_matrix, n_matches=n_matches, threads=threads, verbose=verbose) params['workout/dim4free_dec'] = workout__dim4free_dec all_params = OrderedDict({f"'threads': {threads}, ": params}) res = run_all(find_norm_kernel, list(all_params.values()), lower_bound=lower_bound, upper_bound=upper_bound, step_size=step_size, trials=trials, workers=workers, seed=seed) # __import__('IPython').embed() goal_r0 = list(res.values())[0][0] return goal_r0
def solve_asvp(A, **kwds): """ A G6K Approx-SVP Solver :param A: basis matrix (repr. in list) :param keep_tmpfile: keep the reduced matrix (default: False) :param load_matrix: filename for temp matrix file :param verbose: ... (default: True) :param workout__dim4free_dec: By how much do we decreaseee dim4free at each iteration (default: 3) :param goal_r0__gh: ... Quit when this is reached (default: 1.05) """ n = len(A) keep_tmpfile = kwds.get('keep_tmpfile', False) threads = kwds.get('threads', 4) load_matrix = kwds.get('load_matrix', f'svpchallenge-{n}.txt') verbose = kwds.get('verbose', True) workout__dim4free_dec = kwds.get('workout__dim4free_dec', 3) goal_r0__gh = kwds.get('goal_r0__gh', 1.05) params = SieverParams(threads=threads, load_matrix=load_matrix, verbose=verbose) params['workout/dim4free_dec'] = workout__dim4free_dec params['goal_r0__gh'] = goal_r0__gh with open(load_matrix, 'w') as f: f.write(str_mat(A)) res = asvp(n, params, threads) if keep_tmpfile: with open(load_matrix, 'w') as f: f.write(str(res)) else: os.system(f'rm -f {load_matrix}') res = [[res[i,j] for j in range(res.ncols)] for i in range(res.nrows)] return res
def test_full_sieve(): full_sieve_kernel(50, SieverParams(), 1)
def __call__(cls, M, predicate, invalidate_cache=lambda: None, preproc_offset=20, threads=1, ph=0, **kwds): # TODO bkz_sieve would be neater here if preproc_offset and M.d >= 40: bkz_res = usvp_pred_bkz_enum_solve( M, predicate, block_size=max(M.d - preproc_offset, 2), max_loops=8, threads=threads, invalidate_cache=invalidate_cache, ) ntests = bkz_res.ntests if bkz_res.success: # this might be enough return bkz_res else: bkz_res = None ntests = 0 from fpylll import IntegerMatrix # reduce size of entries B = IntegerMatrix(M.B.nrows, M.B.ncols) for i in range(M.B.nrows): for j in range(M.B.ncols): B[i, j] = M.B[i, j] // 2**ph params = SieverParams(reserved_n=M.d, otf_lift=False, threads=threads) g6k = Siever(B, params) tracer = SieveTreeTracer(g6k, root_label="sieve", start_clocks=True) g6k.initialize_local(0, M.d // 2, M.d) while g6k.l: g6k.extend_left() with tracer.context("sieve"): try: g6k() except SaturationError: pass # fill the database with g6k.temp_params(**kwds): g6k() invalidate_cache() found, solution = False, None with tracer.context("check"): for v in g6k.itervalues(): # heuristic: v has very small entries ntests += 1 if predicate(v, standard_basis=False): found = True solution = tuple( [int(v_) for v_ in g6k.M.B.multiply_left(v)]) break tracer.exit() cputime = tracer.trace.data[ "cputime"] + bkz_res.cputime if bkz_res else 0 walltime = tracer.trace.data[ "walltime"] + bkz_res.walltime if bkz_res else 0 b0, b0e = M.get_r_exp(0, 0) return USVPPredSolverResults( success=found, ntests=ntests, solution=solution, b0=b0**(0.5) * 2**(b0e / 2.0), cputime=cputime, walltime=walltime, data=tracer.trace, )
def test_svp_challenge(): asvp_kernel(50, SieverParams(load_matrix=None, challenge_seed=0, verbose=True), 1)
def __call__(cls, M, predicate, invalidate_cache=lambda: None, preproc_offset=20, threads=1, **kwds): if preproc_offset and M.d >= 40: bkz_res = usvp_pred_bkz_sieve_solve( M, predicate, block_size=max(M.d - preproc_offset, 2), max_loops=8, threads=threads, invalidate_cache=invalidate_cache, ) ntests = bkz_res.ntests if bkz_res.success: # this might be enough return bkz_res else: bkz_res = None ntests = 0 params = SieverParams(reserved_n=M.d, otf_lift=False, threads=threads) g6k = Siever(M, params) tracer = SieveTreeTracer(g6k, root_label="sieve", start_clocks=True) workout(g6k, tracer, 0, M.d, dim4free_min=0, dim4free_dec=15) invalidate_cache() found, solution = False, None with tracer.context("check"): # check if the workout solved it for us for i in range(g6k.M.d): ntests += 1 if predicate(g6k.M.B[i], standard_basis=True): found = True solution = tuple([int(v_) for v_ in g6k.M.B[i]]) break if found: tracer.exit() b0, b0e = M.get_r_exp(0, 0) return USVPPredSolverResults( success=found, ntests=ntests, solution=solution, b0=b0**(0.5) * 2**(b0e / 2.0), cputime=tracer.trace.data["cputime"], walltime=tracer.trace.data["walltime"], data=tracer.trace, ) with tracer.context("sieve"): try: g6k() except SaturationError: pass while g6k.l: g6k.extend_left() with tracer.context("sieve"): try: g6k() except SaturationError: pass # fill the database with g6k.temp_params(**kwds): g6k() invalidate_cache() with tracer.context("check"): for i in range(g6k.M.d): ntests += 1 if predicate(g6k.M.B[i], standard_basis=True): found = True solution = tuple([int(v_) for v_ in g6k.M.B[i]]) break if not found: for v in g6k.itervalues(): ntests += 1 if predicate(v, standard_basis=False): found = True solution = tuple( [int(v_) for v_ in g6k.M.B.multiply_left(v)]) break tracer.exit() cputime = tracer.trace.data[ "cputime"] + bkz_res.cputime if bkz_res else 0 walltime = tracer.trace.data[ "walltime"] + bkz_res.walltime if bkz_res else 0 b0, b0e = M.get_r_exp(0, 0) return USVPPredSolverResults( success=found, ntests=ntests, solution=solution, b0=b0**(0.5) * 2**(b0e / 2.0), cputime=cputime, walltime=walltime, data=tracer.trace, )
def solve_svp(A, goal_r0=None, threads=4, **kwds): """ A G6K Exact-SVP Solver :param A: basis matrix (repr. in list) :param goal_r0: ... Quit when this is reached. If it's None, use the result of `find_norm` with `n_matches` = 3 !!! may take long time running `find_norm` !!! :param threads: ... (default: 4) :param keep_tmpfile: keep the reduced matrix (default: False) :param load_matrix: filename for temp matrix file :param verbose: ... (default: True) :param alg: algorithm used to solve svp, choosen in 'enum', 'duc18', 'workout'(default) :param debug: ... (default: False) """ n = len(A) keep_tmpfile = kwds.get('keep_tmpfile', False) load_matrix = kwds.get('load_matrix', f'svpchallenge-{n}.txt') verbose = kwds.get('verbose', True) alg = kwds.get('alg', 'workout') debug = kwds.get('debug', False) lower_bound = n # lowest lattice dimension to consider (inclusive) upper_bound = 0 # upper bound on lattice dimension to consider (exclusive) step_size = 2 # increment lattice dimension in these steps trials = 1 # number of experiments to run per dimension workers = 1 # number of parallel experiments to run seed = int.from_bytes(os.urandom(8), 'big') # randomness seed with open(load_matrix, 'w') as f: f.write(str_mat(A)) if goal_r0 is None: goal_r0 = find_norm(load_matrix, n, verbose=verbose, n_matches=3) if verbose: print(f"goal_r0 = {goal_r0}") params = SieverParams(load_matrix=load_matrix, threads=threads, goal_r0=goal_r0, verbose=verbose) params['svp/alg'] = alg all_params = OrderedDict({f"'threads': {threads}, ": params}) stats = run_all(svp_kernel, list(all_params.values()), lower_bound=lower_bound, upper_bound=upper_bound, step_size=step_size, trials=trials, workers=workers, seed=seed) inverse_all_params = OrderedDict([(v, k) for (k, v) in all_params.items()]) stats = sanitize_params_names(stats, inverse_all_params) fmt = "{name:20s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, "\ "flast: {flast:3.2f}, |db|: 2^{avg_max:.2f}" print_stats(fmt, stats, ("cputime", "walltime", "flast", "avg_max"), extractf={ "avg_max": lambda n, params, stat: db_stats(stat)[0] }) res = list(stats.values())[0][0].data['res'] if debug: __import__('IPython').embed() if keep_tmpfile: with open(load_matrix, 'w') as f: f.write(str(res)) else: os.system(f'rm -f {load_matrix}') res = [[res[i, j] for j in range(res.ncols)] for i in range(res.nrows)] return res