def enum(self, M, k, target_prob, preproc_time): b = self.b r = [M.get_r(i, i) for i in range(k, k + b)] radius = r[0] * .99 gh_radius = gaussian_heuristic(r) if b > 30: radius = min(radius, 1.1 * gh_radius) if b < YOLO_PRUNER_MIN_BLOCK_SIZE: return radius, self.strategy.get_pruning(radius, gh_radius) R = tuple([M.get_r(i, i) for i in range(k, k + b)]) overhead = (preproc_time + RESTART_PENALTY) * NODE_PER_SEC start_from = self.last_prunings pruning = prune(radius, overhead, target_prob, [R], descent_method="gradient", precision=53, start_from=start_from) self.last_prunings = pruning.coefficients self.proba = (self.proba * YOLO_MEMORY_LENGTH) + pruning.expectation self.proba /= YOLO_MEMORY_LENGTH + 1 return radius, pruning
def enum(self, M, k, target_prob, preproc_time): b = self.b radius = M.get_r(k, k) * .99 root_det = M.get_root_det(k, k + b - 1) gh_radius, ge = gaussian_heuristic(radius, 0, b, root_det, 1.) if b > 30: radius = min(radius, 1.21 * gh_radius * 2**ge) if b < YOLO_PRUNER_MIN_BLOCK_SIZE: return radius, self.strategy.get_pruning(radius, gh_radius * 2**ge) R = tuple([M.get_r(i, i) for i in range(k, k + b)]) overhead = (preproc_time + RESTART_PENALTY) * NODE_PER_SEC start_from = self.last_prunings pruning = prune(radius, overhead, target_prob, [R], descent_method="gradient", precision=53, start_from=start_from) self.last_prunings = pruning.coefficients self.proba = (self.proba * YOLO_MEMORY_LENGTH) + pruning.probability self.proba /= YOLO_MEMORY_LENGTH + 1 return radius, pruning
def decide_enumeration(self, kappa, block_size, param, stats=None, preproc_time=0.1, target_probability=.5): radius = self.M.get_r(kappa, kappa) root_det = self.M.get_root_det(kappa, kappa + block_size) gh_radius, ge = gaussian_heuristic(radius, 0, block_size, root_det, 1.0) if block_size < AUTO_MIN_BLOCK_SIZE: strategy = param.strategies[block_size] return radius, strategy.get_pruning(radius, gh_radius * 2**ge) else: with stats.context("pruner"): R = [ self.M.get_r(i, i) for i in range(kappa, kappa + block_size) ] overhead = preproc_time * AUTO_NODE_PER_SEC start_from = self.last_pruning[block_size] pruning = prune(radius, overhead, target_probability, [R], descent_method="gradient", precision=53, start_from=start_from) self.last_pruning[block_size] = pruning.coefficients return radius, pruning
def test_pruner_vec(n=20, m=20): M = prepare(n, m) if have_numpy: vec = [] for m in M: vec.append(tuple(dump_r(m, 0, n))) radius = sum([mat.get_r(0, 0) for mat in M])/len(M) pruning = prune(radius, 0, 0.9, vec) assert pruning.probability >= 0.89
def get_pruning(self, M, kappa, target_prob, preproc_time): block_size = self.block_size r = tuple([M.get_r(i, i) for i in range(kappa, kappa+block_size)]) radius = r[0] * .99 gh_radius = gaussian_heuristic(r) if block_size > YOLO_GHBOUND_MIN_BLOCK_SIZE: radius = min(radius, 1.21 * gh_radius) if block_size < YOLO_PRUNER_MIN_BLOCK_SIZE: return radius, self.strategy.get_pruning(radius, gh_radius) overhead = (preproc_time + RETRY_PENALTY) * NODE_PER_SEC self.last_pruning = prune(radius, overhead, target_prob, [r], descent_method="gradient", metric="probability", float_type="double", pruning=self.last_pruning) return radius, self.last_pruning
def yolo_hsvp(n, A, gh_factor, core=0): timer = Timer() ybkz = YoloBKZ(A, tuners=tuners) start_from = None start_from_rec = None first_len = ybkz.M.get_r(0, 0) root_det = ybkz.M.get_root_det(0, n) gh_radius, ge = gaussian_heuristic(first_len, 0, n, root_det, 1.) gh_radius = abs(gh_radius * 2**ge) radius = gh_factor * gh_radius target_prob = (1. / gh_factor)**(n / 2) trial = 0 count = 0 restarted = 0 ybkz.randomize(0, n, density=1) while True: timer.reset() max_efficiency = 0. for b in range(8, n / 2, 4): ybkz.tour(b, target_prob=.50) restarted += 1 for b in range(n / 2, n - 10, 2): count += 1 ybkz.tour(b, target_prob=.10) overhead = NODE_PER_SEC * timer.elapsed() R = tuple([ybkz.M.get_r(i, i) for i in range(0, n)]) title = "c=%d r=%d b=%d t=%.1fs" % (core, restarted, b, timer.elapsed()) print title pruning = prune(radius, overhead, target_prob, [R], descent_method="hybrid", precision=53, start_from=start_from) start_from = pruning.coefficients print "c=%d pruning approximated t=%.1fs" % (core, timer.elapsed()) pruning = prune(radius, overhead, target_prob, [R], descent_method="gradient", precision=YOLO_PRUNER_PREC, start_from=start_from) title = "c=%d r=%d b=%d t=%.1fs p=%1.2e e=%.1fs" % ( core, restarted, b, timer.elapsed(), pruning.probability / target_prob, (target_prob * timer.elapsed()) / pruning.probability) print title plot_and_save([log(x / gh_radius) / log(2.) for x in R], title, '%d/c%ds%d.png' % (n, core, count)) start_from = pruning.coefficients try: enum_obj = Enumeration(ybkz.M) solution, _ = enum_obj.enumerate(0, n, radius, 0, pruning=pruning.coefficients) ybkz.insert(0, n, solution) print print list(A[0]) return except EnumerationError: print "c=%d Enum failed t=%.1fs" % (core, timer.elapsed()) pass efficiency = (pruning.probability / timer.elapsed()) # RECYCLING r_start = count % 10 recycling_radius = ybkz.M.get_r(r_start, r_start) * .99 pruning = prune(recycling_radius, overhead, target_prob, [R[r_start:]], descent_method="hybrid", precision=53) title = "REC c=%d r=%d b=%d t=%.1fs p=%1.2e e=%.1fs" % ( core, restarted, b, timer.elapsed(), pruning.probability / target_prob, (target_prob * timer.elapsed()) / pruning.probability) print title try: hints = [] enum_obj = Enumeration(ybkz.M, n / 2) solution, _ = enum_obj.enumerate(r_start, n, recycling_radius, r_start, pruning=pruning.coefficients, aux_sols=hints) hints = [sol for (sol, _) in hints[1:]] ybkz.insert(r_start, n, solution, hints=hints) print "c=%d Recycled %d t=%.1fs" % (core, len(hints) + 1, timer.elapsed()) break except EnumerationError: pass start_from_rec = pruning.coefficients # END OF RECYCLING if 2 * efficiency < max_efficiency: ybkz.randomize(0, n, density=1) ybkz.lll_obj(0, 0, n) break max_efficiency = max(efficiency, max_efficiency) timer.reset()
def test_pruner_gso(n=20, m=20): M = prepare(n, m) radius = sum([mat.get_r(0, 0) for mat in M])/len(M) pruning = prune(radius, 0, 0.9, M) assert pruning.probability >= 0.89
def test_pruner(): # A dummy prune to load tabulated values prune(5, 50, .5, 10*[1.]) for (n, overhead) in dim_oh: print(" \n ~~~~ Dim %d \n" % n) M = prepare(n) r = [M.get_r(i, i) for i in range(n)] print(" \n GREEDY") radius = gaussian_heuristic(r) * 1.6 print("pre-greedy radius %.4e" % radius) tt = clock() (radius, pruning) = prune(radius, overhead, 200, r, descent_method="greedy", metric="solutions") print("Time %.4e"%(clock() - tt)) print("post-greedy radius %.4e" % radius) print(pruning) print("cost %.4e" % sum(pruning.detailed_cost)) solutions = Enumeration(M, nr_solutions=10000).enumerate(0, n, radius, 0, pruning=pruning.coefficients) print(len(solutions)) assert len(solutions)/pruning.expectation < 2 assert len(solutions)/pruning.expectation > .2 print(" \n GREEDY \n") print("pre-greedy radius %.4e" % radius) tt = clock() (radius, pruning) = prune(radius, overhead, 200, r, descent_method="greedy", metric="solutions") print("Time %.4e"%(clock() - tt)) print("post-greedy radius %.4e" % radius) print(pruning) print("cost %.4e" % sum(pruning.detailed_cost)) solutions = Enumeration(M, nr_solutions=10000).enumerate(0, n, radius, 0, pruning=pruning.coefficients) print(len(solutions)) assert len(solutions)/pruning.expectation < 2 assert len(solutions)/pruning.expectation > .2 print(" \n GRADIENT \n") print("radius %.4e" % radius) tt = clock() pruning = prune(radius, overhead, 200, r, descent_method="gradient", metric="solutions") print("Time %.4e"%(clock() - tt)) print(pruning) print("cost %.4e" % sum(pruning.detailed_cost)) solutions = Enumeration(M, nr_solutions=10000).enumerate(0, n, radius, 0, pruning=pruning.coefficients) print(len(solutions)) assert len(solutions)/pruning.expectation < 2 assert len(solutions)/pruning.expectation > .2 print(" \n HYBRID \n") print("radius %.4e" % radius) tt = clock() pruning = prune(radius, overhead, 200, r, descent_method="hybrid", metric="solutions") print("Time %.4e"%(clock() - tt)) print(pruning) print("cost %.4e" % sum(pruning.detailed_cost)) solutions = Enumeration(M, nr_solutions=10000).enumerate(0, n, radius, 0, pruning=pruning.coefficients) print(len(solutions)) assert len(solutions)/pruning.expectation < 2 assert len(solutions)/pruning.expectation > .2
def recycled_svp_reduction(self, kappa, block_size, param, stats): """ :param kappa: :param block_size: :param params: :param stats: """ if stats is None: stats = DummyStats(self) self.M.update_gso() self.lll_obj.size_reduction(0, kappa + 1) self.lll_obj(kappa, kappa, kappa + block_size) old_first, old_first_expo = self.M.get_r_exp(kappa, kappa) remaining_probability, rerandomize = 1.0, False print " - ", preproc_block_size = PREPROC_BLOCK_SIZE_INIT while remaining_probability > 1. - param.min_success_probability: preproc_block_size += PREPROC_BLOCK_SIZE_INCR start_preproc = time() with stats.context("preproc"): rec_clean = self.recycled_svp_preprocessing( kappa, block_size, param, stats, preproc_block_size) time_preproc = time() - start_preproc radius, expo = self.M.get_r_exp(kappa, kappa) if param.flags & BKZ.GH_BND: root_det = self.M.get_root_det(kappa, kappa + block_size) radius, expo = gaussian_heuristic(radius, expo, block_size, root_det, param.gh_factor) overhead = NODE_PER_SEC * time_preproc with stats.context("postproc"): self.M.update_gso() R = dump_r(self.M, kappa, block_size) # print R goal_proba = 1.01 * ((param.min_success_probability - 1) / remaining_probability + 1) pruning = prune(radius * 2**expo, overhead, goal_proba, [R], descent_method="gradient", precision=53) print goal_proba, pruning.probability try: enum_obj = Enumeration(self.M, self.recycling_pool_max_size) aux_sols = [] with stats.context("svp", E=enum_obj): K = [x for x in pruning.coefficients] radius *= 1.05 for i in range(5, preproc_block_size): K[i] /= 1.05 solution, max_dist = enum_obj.enumerate(kappa, kappa + block_size, radius, expo, pruning=K, aux_sols=aux_sols) V = [v for (v, _) in aux_sols[:10]] self.multi_insert(V, kappa, block_size, stats) except EnumerationError: print 0, pass remaining_probability *= (1 - pruning.probability) self.lll_obj.size_reduction(0, kappa + 1) new_first, new_first_expo = self.M.get_r_exp(kappa, kappa) clean = old_first <= new_first * 2**(new_first_expo - old_first_expo) return clean # def to_cannonical(A, v, kappa, block_size): # v = kappa*[0] + [x for x in v] + (A.nrows - (kappa + block_size)) * [0] # v = IntegerMatrix.from_iterable(1, A.nrows, map(lambda x: int(round(x)), v)) # v = tuple((v*A)[0]) # return v # def multi_insert_from_cannonical(M, V, kappa, block_size): # d = M.d # s = d # l = len(V) # for v in V: # w = M.babai(v) # for i in range(kappa+block_size, d): # assert w[i] == 0 # M.create_row() # with self.M.row_ops(s, s+1): # for i in range(kappa + block_size): # self.M.row_addmul(s, i, w[i]) # s += 1 # for i in range(l).reversed(): # self.M.move_row(kappa, d+i) # with stats.context("lll"): # self.lll_obj(kappa, kappa, kappa + block_size + 1) # for i in range(l): # self.M.move_row(kappa + block_size + i, s) # for i in range(l): # self.M.remove_last_row()