def check_proof_multi(commitment, proof, x, ys, setup): """ Check a proof for a Kate commitment for an evaluation f(x w^i) = y_i """ n = len(ys) root_of_unity = get_root_of_unity(n) # Interpolate at a coset. Note because it is a coset, not the subgroup, we have to multiply the # polynomial coefficients by x^i interpolation_polynomial = fft(ys, MODULUS, root_of_unity, True) interpolation_polynomial = [div(c, pow(x, i, MODULUS)) for i, c in enumerate(interpolation_polynomial)] # Verify the pairing equation # # e([commitment - interpolation_polynomial(s)], [1]) = e([proof], [s^n - x^n]) # equivalent to # e([commitment - interpolation_polynomial]^(-1), [1]) * e([proof], [s^n - x^n]) = 1_T # xn_minus_yn = b.add(setup[1][n], b.multiply(b.neg(b.G2), pow(x, n, MODULUS))) commitment_minus_interpolation = b.add(commitment, b.neg(lincomb( setup[0][:len(interpolation_polynomial)], interpolation_polynomial, b.add, b.Z1))) pairing_check = b.pairing(b.G2, b.neg(commitment_minus_interpolation), False) pairing_check *= b.pairing(xn_minus_yn, proof, False) pairing = b.final_exponentiate(pairing_check) return pairing == b.FQ12.one()
def verify(points, commitment, proof): # Crop the base points to just what we need points = points[:2**len(proof.L)] # Fiat-shamir randomness value r = hash(serialize_point(commitment)) # For verification, we need to generate the same random linear combination of # base points that the prover did.. But because we don't need to use it until # the end, we do it more efficiently here: when we progress through the rounds, # we keep track of how many times each points[i] will appear in the final # result... points_coeffs = [1] # log(n) rounds, just like the prover... for i in range(len(proof.L)): r = hash(r + serialize_point(proof.L[i]) + serialize_point(proof.R[i])) # Generate random coefficient for recombining (same as the prover) a = int.from_bytes(r, 'little') % b.curve_order # print('a value: ', a) # Add L and R into the commitment, applying the appropriate coefficients commitment = b.add( proof.L[i], b.add(b.multiply(commitment, a), b.multiply(proof.R[i], a**2))) # print('intermediate commitment:', commitment) # Update the coefficients (points_coeffs[i] = how many times points[i] will # appear in the single base point of the last round) points_coeffs = sum([[(x * a) % b.curve_order, x] for x in points_coeffs], []) # Finally, we do the linear combination combined_point = lincomb(points, points_coeffs, b.add, b.Z1) # Base case check: base_point * coefficient ?= commitment return b.eq(b.multiply(combined_point, proof.tip), commitment)
def checker(data, msg): agg_pk = Z1 agg_sig = Z2 for i in range(len(data)): if data[i]["Vote"].encode() == msg: agg_pk = add(agg_pk, pubkey_to_G1(bytes.fromhex(data[i]["PK"]))) agg_sig = add(agg_sig, signature_to_G2(bytes.fromhex(data[i]["Sign"]))) isok = singlechecker(agg_pk, agg_sig, msg) print(isok)
def prove_evaluation(points, commitment, poly, x, y): assert is_power_of_two(len(poly)) # Crop the base points to just what we need. We add an additional base point, # which we will use to mix in the _evaluation_ of the polynomial. points, H = points[:len(poly)], points[len(poly)] # Alongside the base points, we track the powers of the x coordinate we are # proving an evaluation for. These points get manipulated in the same way as the # base points do. xpowers = [pow(x, i, b.curve_order) for i in range(len(poly))] # Left-side points for the proof L = [] # Right-side points for the proof R = [] # Fiat-shamir randomness value r = hash( serialize_point(commitment) + x.to_bytes(32, 'little') + y.to_bytes(32, 'little')) # For security, we randomize H H = b.multiply(H, int.from_bytes(r, 'little') % b.curve_order) while len(poly) > 1: # Generate the left-side and right-side points, except we also mix in a similarly # constructed "commitment" that uses `H * powers of x` as its base instead of the # base points. polyL, polyR = left_half(poly), right_half(poly) pointsL, pointsR = left_half(points), right_half(points) xpowersL, xpowersR = left_half(xpowers), right_half(xpowers) yL = commit(pointsR, polyL) yR = commit(pointsL, polyR) L.append( b.add(yL, b.multiply(H, sum(a * b for a, b in zip(xpowersR, polyL))))) R.append( b.add(yR, b.multiply(H, sum(a * b for a, b in zip(xpowersL, polyR))))) # Generate random coefficient for recombining the L and R and commitment r = hash(r + serialize_point(L[-1]) + serialize_point(R[-1])) a = int.from_bytes(r, 'little') % b.curve_order # print('a value: ', a) # Generate half-size polynomial and points for the next round. Notice how we treat # the powers of x the same way that we do the base points poly = [(cL + cR * a) % b.curve_order for (cL, cR) in zip(polyL, polyR)] points = [ b.add(b.multiply(pL, a), pR) for (pL, pR) in zip(pointsL, pointsR) ] xpowers = [(xL * a + xR) % b.curve_order for (xL, xR) in zip(xpowersL, xpowersR)] # print('intermediate commitment:', b.add(commit(points, poly), b.multiply(H, sum(a*b for a,b in zip(xpowers, poly))))) return Proof(L, R, poly[0])
def execute(cls, stack: MichelsonStack, stdout: List[str], context: AbstractContext): a, b = cast( Tuple[Union[IntType, NatType, MutezType, TimestampType, BLS12_381_G1Type, BLS12_381_G2Type, BLS12_381_FrType], ...], stack.pop2()) res_type, = dispatch_types( type(a), type(b), mapping={ (NatType, NatType): (NatType, ), (NatType, IntType): (IntType, ), (IntType, NatType): (IntType, ), (IntType, IntType): (IntType, ), (TimestampType, IntType): (TimestampType, ), (IntType, TimestampType): (TimestampType, ), (MutezType, MutezType): (MutezType, ), (BLS12_381_FrType, BLS12_381_FrType): (BLS12_381_FrType, ), (BLS12_381_G1Type, BLS12_381_G1Type): (BLS12_381_G1Type, ), (BLS12_381_G2Type, BLS12_381_G2Type): (BLS12_381_G2Type, ) }) res_type = cast( Union[Type[IntType], Type[NatType], Type[TimestampType], Type[MutezType], Type[BLS12_381_G1Type], Type[BLS12_381_G2Type], Type[BLS12_381_FrType]], res_type) if issubclass(res_type, IntType): res = res_type.from_value(int(a) + int(b)) # type: ignore else: res = res_type.from_point(bls12_381.add( a.to_point(), b.to_point())) # type: ignore stack.push(res) stdout.append(format_stdout(cls.prim, [a, b], [res])) # type: ignore return cls(stack_items_added=1)
def _AggregatePKs(PKs: Sequence[BLSPubkey]) -> BLSPubkey: assert len(PKs) >= 1, 'Insufficient number of PKs. (n < 1)' aggregate = Z1 # Seed with the point at infinity for pk in PKs: pubkey_point = pubkey_to_G1(pk) aggregate = add(aggregate, pubkey_point) return G1_to_pubkey(aggregate)
def prove(points, commitment, poly): assert is_power_of_two(len(poly)) # Crop the base points to just what we need points = points[:len(poly)] # Left-side points for the proof L = [] # Right-side points for the proof R = [] # Fiat-shamir randomness value r = hash(serialize_point(commitment)) # log(n) rounds... while len(poly) > 1: # Generate the left-side and right-side points polyL, polyR = left_half(poly), right_half(poly) pointsL, pointsR = left_half(points), right_half(points) yL = commit(pointsR, polyL) yR = commit(pointsL, polyR) L.append(yL) R.append(yR) # Generate random coefficient for recombining the L and R and commitment r = hash(r + serialize_point(yL) + serialize_point(yR)) a = int.from_bytes(r, 'little') % b.curve_order # print('a value: ', a) # Generate half-size polynomial and points for the next round poly = [(cL + cR * a) % b.curve_order for (cL, cR) in zip(polyL, polyR)] points = [ b.add(b.multiply(pL, a), pR) for (pL, pR) in zip(pointsL, pointsR) ] # print('intermediate commitment:', commit(points, poly)) return Proof(L, R, poly[0])
def verify_multiple(pubkeys: Sequence[BLSPubkey], messages: Sequence[bytes], signature: BLSSignature, domain: int) -> bool: len_msgs = len(messages) if len(pubkeys) != len_msgs: raise ValidationError( "len(pubkeys) (%s) should be equal to len(messages) (%s)" % (len(pubkeys), len_msgs)) try: o = FQ12([1] + [0] * 11) for m_pubs in set(messages): # aggregate the pubs group_pub = Z1 for i in range(len_msgs): if messages[i] == m_pubs: group_pub = add(group_pub, decompress_G1(pubkeys[i])) o *= pairing(hash_to_G2(m_pubs, domain), group_pub, final_exponentiate=False) o *= pairing(decompress_G2(signature), neg(G1), final_exponentiate=False) final_exponentiation = final_exponentiate(o) return final_exponentiation == FQ12.one() except (ValidationError, ValueError, AssertionError): return False
def generate_proof(data_tree, commitment_tree, proof_tree, indices, setup): committee_root = commitment_tree[0][0] # Generate a random r value; we use a power of r as a coefficient for each sub-leaf # to create a random linear combination r = int.from_bytes( hash(str([committee_root[0].n] + indices).encode('utf-8')), 'big') % b.curve_order #print("r", r) # Total polynomial that we are evaluating total_poly_evaluations = [0] * WIDTH # The set of all intermediate commitments commitments = [] total_proofs = b.Z1 for i, index in enumerate(indices): c = [] # Walk from top to bottom of the tree for d in range(DEPTH): # Power of r for this leaf rfactor = pow(r, i * DEPTH + d, MODULUS) # Position of the index in this layer of data position_of_leaf = index // WIDTH**(DEPTH - d - 1) # Position of the index within its data chunk sub_index = position_of_leaf % WIDTH proof = proof_tree[d][position_of_leaf] # Add in rfactor*D / (X - w**i) to the total total_proofs = b.add(total_proofs, b.multiply(proof, rfactor)) # Provide as part of the proof all intermediate-level commitments if d > 0: c.append(commitment_tree[d][position_of_leaf // WIDTH]) commitments.append(c) # Generate a polynomial commitment for the result return commitments, b.normalize(total_proofs)
def _interpolate_in_group(group_shares:Dict[int,tuple], group_gen:tuple) -> tuple: lagrange_coeff = _all_lagrange_coeff_at_point(0, group_shares.keys(), bls_curve.curve_order) combined_group_element = bls_curve.multiply(group_gen, 0) for id, gr_el in group_shares.items(): combined_group_element = bls_curve.add(combined_group_element, bls_curve.multiply(group_shares[id], lagrange_coeff[id])) return combined_group_element
def _fft(vals, modulus, roots_of_unity): if len(vals) <= 4 and type(vals[0]) != tuple: #return vals return _simple_ft(vals, modulus, roots_of_unity) elif len(vals) == 1 and type(vals[0]) == tuple: return vals L = _fft(vals[::2], modulus, roots_of_unity[::2]) R = _fft(vals[1::2], modulus, roots_of_unity[::2]) o = [0 for i in vals] for i, (x, y) in enumerate(zip(L, R)): y_times_root = b.multiply( y, roots_of_unity[i]) if type(y) == tuple else y * roots_of_unity[i] o[i] = b.add( x, y_times_root) if type(x) == tuple else (x + y_times_root) % modulus o[i + len(L)] = b.add(x, b.neg(y_times_root)) if type( x) == tuple else (x - y_times_root) % modulus return o
def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature: if len(signatures) < 1: raise ValidationError( 'Insufficient number of signatures: should be greater than' ' or equal to 1, got %d' % len(signatures)) aggregate = Z2 # Seed with the point at infinity for signature in signatures: signature_point = signature_to_G2(signature) aggregate = add(aggregate, signature_point) return G2_to_signature(aggregate)
def check_proof_single(commitment, proof, x, y, setup): """ Check a proof for a Kate commitment for an evaluation f(x) = y """ # Verify the pairing equation # # e([commitment - y], [1]) = e([proof], [s - x]) # equivalent to # e([commitment - y]^(-1), [1]) * e([proof], [s - x]) = 1_T # s_minus_x = b.add(setup[1][1], b.multiply(b.neg(b.G2), x)) commitment_minus_y = b.add(commitment, b.multiply(b.neg(b.G1), y)) pairing_check = b.pairing(b.G2, b.neg(commitment_minus_y), False) pairing_check *= b.pairing(s_minus_x, proof, False) pairing = b.final_exponentiate(pairing_check) return pairing == b.FQ12.one()
def verify_evaluation(points, commitment, proof, x, y): # Crop the base points to just what we need. We add an additional base point, # which we will use to mix in the _evaluation_ of the polynomial. points, H = points[:2**len(proof.L)], points[2**len(proof.L)] # Powers of x, as in the prover xpowers = [pow(x, i, b.curve_order) for i in range(len(poly))] # Fiat-shamir randomness value r = hash( serialize_point(commitment) + x.to_bytes(32, 'little') + y.to_bytes(32, 'little')) # For security, we randomize H H = b.multiply(H, int.from_bytes(r, 'little') % b.curve_order) # We "mix in" H * the claimed evaluation P(x) = y. Notice that `H * P(x)` equals the # dot-product of `H * powers of x` and the polynomial coefficients, so it has the # "same format" as the polynomial commitment itself. This allows us to verify the # evaluation using the same technique that we use to just prove that the commitment # is valid commitment = b.add(commitment, b.multiply(H, y)) # Track the linear combination so we can generate the final-round point and xpower, # just as before points_coeffs = [1] for i in range(len(proof.L)): # Generate random coefficient for recombining (same as the prover) r = hash(r + serialize_point(proof.L[i]) + serialize_point(proof.R[i])) a = int.from_bytes(r, 'little') % b.curve_order # print('a value: ', a) # Add L and R into the commitment, applying the appropriate coefficients commitment = b.add( proof.L[i], b.add(b.multiply(commitment, a), b.multiply(proof.R[i], a**2))) # print('intermediate commitment:', commitment) # Update the coefficients (as in basic verification above) points_coeffs = sum([[(x * a) % b.curve_order, x] for x in points_coeffs], []) # Finally, we do the linear combination; same one for base points and x powers combined_point = lincomb(points, points_coeffs, b.add, b.Z1) combined_x_powers = sum(p * c for p, c in zip(xpowers, points_coeffs)) # Base case check: base_point * coefficient ?= commitment. Note that here we # have to also mix H * the combined xpower into the final base point return b.eq( b.add(b.multiply(combined_point, proof.tip), b.multiply(H, (proof.tip * combined_x_powers) % b.curve_order)), commitment)
def get_aggregate_key(keys): r = b.Z1 for i, key in keys.items(): key_point = pubkey_to_G1(key) coef = 1 for j in keys: if j != i: coef = -coef * (j + 1) * prime_field_inv( i - j, b.curve_order) % b.curve_order r = b.add(r, b.multiply(key_point, coef)) return G1_to_pubkey(r)
def reconstruct(signatures): r = b.Z2 for i, sig in signatures.items(): sig_point = signature_to_G2(sig) coef = 1 for j in signatures: if j != i: coef = -coef * (j + 1) * prime_field_inv( i - j, b.curve_order) % b.curve_order r = b.add(r, b.multiply(sig_point, coef)) return G2_to_signature(r)
def _simple_ft(vals, modulus, roots_of_unity): L = len(roots_of_unity) o = [] for i in range(L): last = b.Z1 if type(vals[0]) == tuple else 0 for j in range(L): if type(vals[0]) == tuple: last = b.add(last, b.multiply(vals[j], roots_of_unity[(i * j) % L])) else: last += vals[j] * roots_of_unity[(i * j) % L] o.append(last if type(last) == tuple else last % modulus) return o
def verify_proof(proof, commitment_root, indices, values, setup): # Regenerate the same r value as above r = int.from_bytes( hash(str([commitment_root[0].n] + indices).encode('utf-8')), 'big') % b.curve_order #print("r", r) commitments, witness = proof # We're making a big pairing check that essentially checks the equation: # sum [(P_i - y_i) * r_i * Z(everything except x_i)] = w * Z(everything) = sum [Q_i * r_i * Z(everything)] # where Z(set) = product: (X - s) for s in set pairing_check = b.FQ12.one() for i, (c, index, v) in enumerate(zip(commitments, indices, values)): for d in range(DEPTH): rfactor = pow(r, i * DEPTH + d, MODULUS) position_of_leaf = index // WIDTH**(DEPTH - d - 1) # Position of this leaf in the data sub_index = position_of_leaf % WIDTH #print('d', d, 'i', index, 'rfactor', rfactor, 'pos', position_of_leaf) # P_i comm = c[d - 1] if d else commitment_root comm = (comm[0], comm[1], b.FQ.one()) leaf = hash_point_to_field(c[d]) if d < DEPTH - 1 else v #print('comm', comm, 'subindex', sub_index, 'leaf', leaf) # (P_i - y_i) * r_i comm_minus_leaf_times_r = b.multiply( b.add(comm, b.multiply(b.G1, MODULUS - leaf)), rfactor) # Z(everything except x_i) Z_comm = b.multiply(setup[3][sub_index], field.inv(LAGRANGE_POLYS[sub_index][-1])) # Add the product into the pairing pairing_check *= b.pairing(Z_comm, comm_minus_leaf_times_r, False) # Z(everything) global_Z_comm = b.add(setup[1][WIDTH], b.neg(setup[1][0])) # Subtract out sum [Q_i * r_i * Z(everything)] pairing_check *= b.pairing(b.neg(global_Z_comm), (witness[0], witness[1], b.FQ.one()), False) o = b.final_exponentiate(pairing_check) assert o == b.FQ12.one(), o return o == b.FQ12.one()
def _AggregatePKs(PKs: Sequence[BLSPubkey]) -> BLSPubkey: """ Aggregate the public keys. Raise `ValidationError` when there is input validation error. """ try: assert len(PKs) >= 1, 'Insufficient number of PKs. (n < 1)' except Exception as e: raise ValidationError(e) aggregate = Z1 # Seed with the point at infinity for pk in PKs: pubkey_point = pubkey_to_G1(pk) aggregate = add(aggregate, pubkey_point) return G1_to_pubkey(aggregate)
def hash_to_G2(message: bytes, DST: bytes) -> G2Uncompressed: """ Convert a message to a point on G2 as defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-05#section-3 Contants and inputs follow the ciphersuite ``BLS12381G2-SHA256-SSWU-RO-`` defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-05#section-8.9.2 """ u0 = hash_to_base_FQ2(message, 0, DST) u1 = hash_to_base_FQ2(message, 1, DST) q0 = map_to_curve_G2(u0) q1 = map_to_curve_G2(u1) r = add(q0, q1) p = clear_cofactor_G2(r) return p
def hash_to_G2(message: bytes, DST: bytes, hash_function: HASH) -> G2Uncompressed: """ Convert a message to a point on G2 as defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-07#section-6.6.3 The idea is to first hash into FQ2 and then use SSWU to map the result into G2. Contants and inputs follow the ciphersuite ``BLS12381G2_XMD:SHA-256_SSWU_RO_`` defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-07#section-8.8.2 """ u0, u1 = hash_to_field_FQ2(message, 2, DST, hash_function) q0 = map_to_curve_G2(u0) q1 = map_to_curve_G2(u1) r = add(q0, q1) p = clear_cofactor_G2(r) return p
def derive_public_child(master_pubkey_share: bytes, path: bytes, master_pubkey: bytes = None) -> tuple: if not master_pubkey: master_pubkey = master_pubkey_share root_public_key, root_public_chaincode = parse_master_pubkey(master_pubkey) root_pubkey_address = bls_conv.G1_to_pubkey(root_public_key) public_chaincode_address = bls_conv.G1_to_pubkey(root_public_chaincode) h = _hash_to_bls_field( _derivation_format_hash_input(root_pubkey_address, public_chaincode_address, path)) public_key_share, public_chaincode_share = parse_master_pubkey( master_pubkey_share) return bls_curve.add(public_key_share, bls_curve.multiply(public_chaincode_share, h))
def fk20_multi_data_availability_optimized(polynomial, l, setup): """ FK20 multi-proof method, optimized for dava availability where the top half of polynomial coefficients == 0 """ assert is_power_of_two(len(polynomial)) n = len(polynomial) // 2 k = n // l assert is_power_of_two(n) assert is_power_of_two(l) assert k >= 1 assert all(x == 0 for x in polynomial[n:]) reduced_polynomial = polynomial[:n] # Preprocessing part -- this is independent from the polynomial coefficients and can be # done before the polynomial is known, it only needs to be computed once xext_fft = [] for i in range(l): x = setup[0][n - l - 1 - i::-l] + [b.Z1] xext_fft.append(toeplitz_part1(x)) add_instrumentation() hext_fft = [b.Z1] * 2 * k for i in range(l): toeplitz_coefficients = reduced_polynomial[- i - 1::l] + [0] * (k + 1) \ + reduced_polynomial[2 * l - i - 1: - l - i:l] # Compute the vector h from the paper using a Toeplitz matric multiplication hext_fft = [ b.add(v, w) for v, w in zip( hext_fft, toeplitz_part2(toeplitz_coefficients, xext_fft[i])) ] # Final FFT done after summing all h vectors h = toeplitz_part3(hext_fft) h = h + [b.Z1] * k # The proofs are the DFT of the h vector return fft(h, MODULUS, get_root_of_unity(2 * k))
def Aggregate(cls, signatures: Sequence[BLSSignature]) -> BLSSignature: """ The Aggregate algorithm aggregates multiple signatures into one. Raise `ValidationError` when there is input validation error. """ try: # Inputs validation for signature in signatures: assert cls._is_valid_signature(signature) # Preconditions assert len(signatures) >= 1 except Exception as e: raise ValidationError(e) # Procedure aggregate = Z2 # Seed with the point at infinity for signature in signatures: signature_point = signature_to_G2(signature) aggregate = add(aggregate, signature_point) return G2_to_signature(aggregate)
def fk20_multi(polynomial, l, setup): """ For a polynomial of size n, let w be a n-th root of unity. Then this method will return k=n/l KZG proofs for the points proof[0]: w^(0*l + 0), w^(0*l + 1), ... w^(0*l + l - 1) proof[0]: w^(0*l + 0), w^(0*l + 1), ... w^(0*l + l - 1) ... proof[i]: w^(i*l + 0), w^(i*l + 1), ... w^(i*l + l - 1) ... """ n = len(polynomial) k = n // l assert is_power_of_two(n) assert is_power_of_two(l) assert k >= 1 # Preprocessing part -- this is independent from the polynomial coefficients and can be # done before the polynomial is known, it only needs to be computed once xext_fft = [] for i in range(l): x = setup[0][n - l - 1 - i::-l] + [b.Z1] xext_fft.append(toeplitz_part1(x)) hext_fft = [b.Z1] * 2 * k for i in range(l): toeplitz_coefficients = polynomial[-i - 1::l] + [0] * ( k + 1) + polynomial[2 * l - i - 1:-l - i:l] # Compute the vector h from the paper using a Toeplitz matric multiplication hext_fft = [ b.add(v, w) for v, w in zip( hext_fft, toeplitz_part2(toeplitz_coefficients, xext_fft[i])) ] h = toeplitz_part3(hext_fft) # The proofs are the DFT of the h vector return fft(h, MODULUS, get_root_of_unity(k))
def aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: o = Z1 for p in pubkeys: o = add(o, decompress_G1(p)) return BLSPubkey(compress_G1(o))
def aggregate_signatures(signatures: Sequence[BLSSignature]) -> BLSSignature: o = Z2 for s in signatures: o = FQP_point_to_FQ2_point(add(o, decompress_G2(s))) return BLSSignature(compress_G2(o))
def aggregate_pubkeys(pubkeys: Sequence[int]) -> int: o = Z1 for p in pubkeys: o = add(o, decompress_G1(p)) return compress_G1(o)
def aggregate_signatures(signatures: Sequence[bytes]) -> Tuple[int, int]: o = Z2 for s in signatures: o = FQP_point_to_FQ2_point(add(o, decompress_G2(s))) return compress_G2(o)
m1, m2 = [int(sha256(i).hexdigest(), 16) for i in [b"D", b"R"]] idx, ny = [(i, j) for i, j in enumerate(result) if j["Name"] == 'New York'][0] ny["Sign"] = G2_to_signature( multiply(multiply(signature_to_G2(unhex(ny["Sign"])), invert(m1, order)), m2)).hex() ny["Vote"] = "R" assert bls.Verify(unhex(ny["PK"]), ny["Vote"].encode(), unhex(ny["Sign"])) result[idx] = ny xored_flag = unhex(connect(result).decode()) fake_result = result[:] w = fake_result[idx + 1] ny = fake_result[idx] ny["Sign"] = G2_to_signature( add(signature_to_G2(unhex(ny["Sign"])), multiply(signature_to_G2(unhex(w["Sign"])), 2))).hex() w["Sign"] = G2_to_signature(neg(signature_to_G2(unhex(w["Sign"])))).hex() assert bls.Verify(bls._AggregatePKs([unhex(ny["PK"]), unhex(w["PK"])]), b'R', bls.Aggregate([unhex(ny["Sign"]), unhex(w["Sign"])])) fake_result[idx] = ny fake_result[idx + 1] = w fake_flag = connect(fake_result) flag = bytexor(fake_flag, xored_flag) print(flag) assert flag == b'inctf{BLS_574nd5_f0r_B0n3h_Lynn_Sh4ch4m}'