def untwist(P): q = P.X.q root = Fq6(Fq2.zero(q), Fq2.one(q), Fq2.zero(q)) zero = Fq6.zero(q) omega2 = Fq12(root, zero) omega3 = Fq12(zero, root) return EC.from_affine(omega2.inverse() * P.x, omega3.inverse() * P.y)
def twist(P): q = P.X.q root = Fq6(Fq2.zero(q), Fq2.one(q), Fq2.zero(q)) zero = Fq6.zero(q) omega2 = Fq12(root, zero) omega3 = Fq12(zero, root) c0 = omega2 * P.x c1 = omega3 * P.y return TwistedEC.from_affine(c0.c0.c0, c1.c0.c0)
def aggregate_unit_sigs(signatures: List[Signature], players: List[int], T: int, ec=default_ec) -> Signature: lambs = Threshold.lagrange_coeffs_at_zero(players, ec) agg = AffinePoint(Fq2.zero(ec.q), Fq2.zero(ec.q), True, ec).to_jacobian() for i, sig in enumerate(signatures): agg += sig.value * lambs[i] return Signature.from_g2(agg)
def aggregate(signatures): """ Aggregate signatures by multiplying them together. This IS secure against rogue public key attacks, assuming these signatures were generated using sign_prepend. """ q = default_ec.q agg_sig = (AffinePoint(Fq2.zero(q), Fq2.zero(q), True, default_ec).to_jacobian()) for sig in signatures: agg_sig += sig.value return PrependSignature.from_g2(agg_sig)
def aggregate_sigs_simple(signatures): """ Aggregate signatures by multiplying them together. This is NOT secure against rogue public key attacks, so do not use this for signatures on the same message. """ q = default_ec.q agg_sig = (AffinePoint(Fq2.zero(q), Fq2.zero(q), True, default_ec).to_jacobian()) for sig in signatures: agg_sig += sig.value return Signature.from_g2(agg_sig)
def aggregate_sigs_secure(signatures, public_keys, message_hashes): """ Aggregate signatures using the secure method, which calculates exponents based on public keys, and raises each signature to an exponent before multiplying them together. This is secure against rogue public key attack, but is slower than simple aggregation. """ if (len(signatures) != len(public_keys) or len(public_keys) != len(message_hashes)): raise Exception("Invalid number of keys") mh_pub_sigs = [(message_hashes[i], public_keys[i], signatures[i]) for i in range(len(signatures))] # Sort by message hash + pk mh_pub_sigs.sort() computed_Ts = BLS.hash_pks(len(public_keys), public_keys) # Raise each sig to a power of each t, # and multiply all together into agg_sig ec = public_keys[0].ec agg_sig = JacobianPoint(Fq2.one(ec.q), Fq2.one(ec.q), Fq2.zero(ec.q), True, ec) for i, (_, _, signature) in enumerate(mh_pub_sigs): agg_sig += signature * computed_Ts[i] return Signature.from_g2(agg_sig)
def divide_by(self, divisor_signatures): """ Signature division (elliptic curve subtraction). This is useful if you have already verified parts of the tree, since verification of the resulting quotient signature will be faster (less pairings have to be perfomed). This function Divides an aggregate signature by other signatures in the aggregate trees. A signature can only be divided if it is part of the subset, and all message/public key pairs in the aggregationInfo for the divisor signature are unique. i.e you cannot divide s1 / s2, if s2 is an aggregate signature containing m1,pk1, which is also present somewhere else in s1's tree. Note, s2 itself does not have to be unique. """ message_hashes_to_remove = [] pubkeys_to_remove = [] prod = JacobianPoint(Fq2.one(default_ec.q), Fq2.one(default_ec.q), Fq2.zero(default_ec.q), True, default_ec) for divisor_sig in divisor_signatures: pks = divisor_sig.aggregation_info.public_keys message_hashes = divisor_sig.aggregation_info.message_hashes if len(pks) != len(message_hashes): raise Exception("Invalid aggregation info") for i in range(len(pks)): divisor = divisor_sig.aggregation_info.tree[ (message_hashes[i], pks[i])] try: dividend = self.aggregation_info.tree[ (message_hashes[i], pks[i])] except KeyError: raise Exception("Signature is not a subset") if i == 0: quotient = (Fq(default_ec.n, dividend) / Fq(default_ec.n, divisor)) else: # Makes sure the quotient is identical for each public # key, which means message/pk pair is unique. new_quotient = (Fq(default_ec.n, dividend) / Fq(default_ec.n, divisor)) if quotient != new_quotient: raise Exception("Cannot divide by aggregate signature," + "msg/pk pairs are not unique") message_hashes_to_remove.append(message_hashes[i]) pubkeys_to_remove.append(pks[i]) prod += (divisor_sig.value * -quotient) copy = Signature(deepcopy(self.value + prod), deepcopy(self.aggregation_info)) for i in range(len(message_hashes_to_remove)): a = message_hashes_to_remove[i] b = pubkeys_to_remove[i] if (a, b) in copy.aggregation_info.tree: del copy.aggregation_info.tree[(a, b)] sorted_keys = list(copy.aggregation_info.tree.keys()) sorted_keys.sort() copy.aggregation_info.message_hashes = [t[0] for t in sorted_keys] copy.aggregation_info.public_keys = [t[1] for t in sorted_keys] return copy
def test_fields(): a = Fq(17, 30) b = Fq(17, -18) c = Fq2(17, a, b) d = Fq2(17, a + a, -5) e = c * d f = e * d assert f != e e_sq = e * e e_sqrt = e_sq.modsqrt() assert pow(e_sqrt, 2) == e_sq a2 = Fq( 172487123095712930573140951348, 3012492130751239573498573249085723940848571098237509182375, ) b2 = Fq(172487123095712930573140951348, 3432984572394572309458723045723849) c2 = Fq2(172487123095712930573140951348, a2, b2) assert b2 != c2 g = Fq6(17, c, d, d * d * c) h = Fq6(17, a + a * c, c * b * a, b * b * d * 21) i = Fq12(17, g, h) assert ~(~i) == i assert (~(i.root)) * i.root == Fq6.one(17) x = Fq12(17, Fq6.zero(17), i.root) assert (~x) * x == Fq12.one(17) j = Fq6(17, a + a * c, Fq2.zero(17), Fq2.zero(17)) j2 = Fq6(17, a + a * c, Fq2.zero(17), Fq2.one(17)) assert j == (a + a * c) assert j2 != (a + a * c) assert j != j2 # Test frob_coeffs one = Fq(default_ec.q, 1) two = one + one a = Fq2(default_ec.q, two, two) b = Fq6(default_ec.q, a, a, a) c = Fq12(default_ec.q, b, b) for base in (a, b, c): for expo in range(1, base.extension): assert base.qi_power(expo) == pow(base, pow(default_ec.q, expo))
def aggregate(signatures): """ Aggregates many (aggregate) signatures, using a combination of simple and secure aggregation. Signatures are grouped based on which ones share common messages, and these are all merged securely. """ public_keys = [] # List of lists message_hashes = [] # List of lists for signature in signatures: if signature.aggregation_info.empty(): raise Exception( "Each signature must have a valid aggregation " + "info") public_keys.append(signature.aggregation_info.public_keys) message_hashes.append(signature.aggregation_info.message_hashes) # Find colliding vectors, save colliding messages messages_set = set() colliding_messages_set = set() for msg_vector in message_hashes: messages_set_local = set() for msg in msg_vector: if msg in messages_set and msg not in messages_set_local: colliding_messages_set.add(msg) messages_set.add(msg) messages_set_local.add(msg) if len(colliding_messages_set) == 0: # There are no colliding messages between the groups, so we # will just aggregate them all simply. Note that we assume # that every group is a valid aggregate signature. If an invalid # or insecure signature is given, and invalid signature will # be created. We don't verify for performance reasons. final_sig = Signature.aggregate_sigs_simple(signatures) aggregation_infos = [sig.aggregation_info for sig in signatures] final_agg_info = AggregationInfo.merge_infos(aggregation_infos) final_sig.set_aggregation_info(final_agg_info) return final_sig # There are groups that share messages, therefore we need # to use a secure form of aggregation. First we find which # groups collide, and securely aggregate these. Then, we # use simple aggregation at the end. colliding_sigs = [] non_colliding_sigs = [] colliding_message_hashes = [] # List of lists colliding_public_keys = [] # List of lists for i in range(len(signatures)): group_collides = False for msg in message_hashes[i]: if msg in colliding_messages_set: group_collides = True colliding_sigs.append(signatures[i]) colliding_message_hashes.append(message_hashes[i]) colliding_public_keys.append(public_keys[i]) break if not group_collides: non_colliding_sigs.append(signatures[i]) # Arrange all signatures, sorted by their aggregation info colliding_sigs.sort(key=lambda s: s.aggregation_info) # Arrange all public keys in sorted order, by (m, pk) sort_keys_sorted = [] for i in range(len(colliding_public_keys)): for j in range(len(colliding_public_keys[i])): sort_keys_sorted.append((colliding_message_hashes[i][j], colliding_public_keys[i][j])) sort_keys_sorted.sort() sorted_public_keys = [pk for (mh, pk) in sort_keys_sorted] computed_Ts = BLS.hash_pks(len(colliding_sigs), sorted_public_keys) # Raise each sig to a power of each t, # and multiply all together into agg_sig ec = sorted_public_keys[0].value.ec agg_sig = JacobianPoint(Fq2.one(ec.q), Fq2.one(ec.q), Fq2.zero(ec.q), True, ec) for i, signature in enumerate(colliding_sigs): agg_sig += signature.value * computed_Ts[i] for signature in non_colliding_sigs: agg_sig += signature.value final_sig = Signature.from_g2(agg_sig) aggregation_infos = [sig.aggregation_info for sig in signatures] final_agg_info = AggregationInfo.merge_infos(aggregation_infos) final_sig.set_aggregation_info(final_agg_info) return final_sig
# using the "enhanced ZCash" format proposed in # https://github.com/pairingwg/bls_standard/issues/16 # (C) 2019 Riad S. Wahby <*****@*****.**> # # see the comment at the top of ../sage-impl/serdes.sage for more information import struct from consts import p from curve_ops import from_jacobian, point_eq from fields import Fq, Fq2, sgn0, sqrt_F2 F1_one = Fq.one(p) F1_zero = Fq.zero(p) F2_one = Fq2.one(p) F2_zero = Fq2.zero(p) class DeserError(Exception): pass class SerError(Exception): pass def serialize(P, compressed=True): if isinstance(P[0], Fq): return _serialize_ell1(P, compressed) if isinstance(P[0], Fq2): return _serialize_ell2(P, compressed)
ySq = y**2 xSq = x**2 xCu = x * xSq z2 = z**2 z4 = z2**2 z6 = z4 * z2 infty = x == 0 and y != 0 and z == 0 match = ySq == xCu + b * z6 return infty or match on_curve_g1 = lambda P: _on_curve(P, Fq(p, 4)) on_curve_g2 = lambda P: _on_curve(P, Fq2(p, 4, 4)) def _subgroup_check(P, on_curve_fn, id_pt): if not on_curve_fn(P): return False Q = q_chain(P) return point_eq(Q, id_pt) id_g1 = (Fq.zero(p), Fq.one(p), Fq.zero(p)) subgroup_check_g1 = lambda P: _subgroup_check(P, on_curve_g1, id_g1) id_g2 = (Fq2.zero(p), Fq2.one(p), Fq2.zero(p)) subgroup_check_g2 = lambda P: _subgroup_check(P, on_curve_g2, id_g2)