def log(p, q): assert curve.is_on_curve(p) assert curve.is_on_curve(q) sqrt_n = int(math.sqrt(curve.n)) + 1 # Compute the baby steps and store them in the 'precomputed' hash table. r = None precomputed = {None: 0} for a in range(1, sqrt_n): r = curve.add(r, p) precomputed[r] = a # Now compute the giant steps and check the hash table for any # matching point. r = q s = curve.mult(sqrt_n, curve.neg(p)) for b in range(sqrt_n): try: a = precomputed[r] except KeyError: pass else: steps = sqrt_n + b logarithm = a + sqrt_n * b return logarithm, steps r = curve.add(r, s) raise AssertionError('logarithm not found')
def log(p, q): assert curve.is_on_curve(p) assert curve.is_on_curve(q) start = random.randrange(curve.n) r = curve.mult(start, p) for x in range(curve.n): if q == r: logarithm = (start + x) % curve.n steps = x + 1 return logarithm, steps r = curve.add(r, p) raise AssertionError('logarithm not found')
def log(p, q, counter=None): assert curve.is_on_curve(p) assert curve.is_on_curve(q) # Pollard's Rho may fail sometimes: it may find a1 == a2 and b1 == b2, # leading to a division by zero error. Because PollardRhoSequence uses # random coefficients, we have more chances of finding the logarithm # if we try again, without affecting the asymptotic time complexity. # We try at most three times before giving up. for i in range(3): sequence = PollardRhoSequence(p, q) tortoise = iter(sequence) hare = iter(sequence) # The range is from 0 to curve.n - 1, but actually the algorithm will # stop much sooner (either finding the logarithm, or failing with a # division by zero). for j in range(curve.n): x1, a1, b1 = next(tortoise) x2, a2, b2 = next(hare) x2, a2, b2 = next(hare) if x1 == x2: if b1 == b2: # This would lead to a division by zero. Try with # another random sequence. break x = (a1 - a2) * inverse_mod(b2 - b1, curve.n) logarithm = x % curve.n steps = i * curve.n + j + 1 return logarithm, steps raise AssertionError('logarithm not found')