Esempio n. 1
0
    def aggregate_sigs_secure(signatures, public_keys, message_hashes):
        """
        Aggregate signatures using the secure method, which calculates
        exponents based on public keys, and raises each signature to an
        exponent before multiplying them together. This is secure against
        rogue public key attack, but is slower than simple aggregation.
        """
        if (len(signatures) != len(public_keys)
                or len(public_keys) != len(message_hashes)):
            raise Exception("Invalid number of keys")
        mh_pub_sigs = [(message_hashes[i], public_keys[i], signatures[i])
                       for i in range(len(signatures))]

        # Sort by message hash + pk
        mh_pub_sigs.sort()

        computed_Ts = hash_pks(len(public_keys), public_keys)

        # Raise each sig to a power of each t,
        # and multiply all together into agg_sig
        ec = public_keys[0].ec
        agg_sig = JacobianPoint(Fq2.one(ec.q), Fq2.one(ec.q), Fq2.zero(ec.q),
                                True, ec)

        for i, (_, _, signature) in enumerate(mh_pub_sigs):
            agg_sig += signature * computed_Ts[i]

        return Signature.from_g2(agg_sig)
Esempio n. 2
0
    def aggregate_priv_keys(private_keys, public_keys, secure):
        """
        Aggregates private keys together
        """
        if not secure:
            sum_keys = sum(pk.value for pk in private_keys) % default_ec.n

        else:
            if not public_keys:
                raise Exception(
                    "Must include public keys in secure aggregation")
            if len(private_keys) != len(public_keys):
                raise Exception("Invalid number of keys")

            priv_pub_keys = zip(public_keys, private_keys)
            priv_pub_keys.sort()
            computed_Ts = hash_pks(len(private_keys), public_keys)
            n = public_keys[0].value.ec.n

            sum_keys = 0
            for i, (_, privkey) in enumerate(priv_pub_keys):
                sum_keys += privkey.value * computed_Ts[i]
                sum_keys %= n

        return PrivateKey.from_bytes(sum_keys.to_bytes(32, "big"))
Esempio n. 3
0
    def aggregate_pub_keys(public_keys, secure):
        """
        Aggregates public keys together
        """
        if len(public_keys) < 1:
            raise Exception("Invalid number of keys")
        public_keys.sort()

        computed_Ts = hash_pks(len(public_keys), public_keys)

        ec = public_keys[0].value.ec
        sum_keys = JacobianPoint(Fq.one(ec.q), Fq.one(ec.q), Fq.zero(ec.q),
                                 True, ec)
        for i in range(len(public_keys)):
            addend = public_keys[i].value
            if secure:
                addend *= computed_Ts[i]
            sum_keys += addend

        return PublicKey.from_g1(sum_keys)
Esempio n. 4
0
    def secure_merge_infos(colliding_infos):
        """
        Infos are merged together with combination of exponents
        """

        # Groups are sorted by message then pk then exponent
        # Each info object (and all of it's exponents) will be
        # exponentiated by one of the Ts
        colliding_infos.sort()

        sorted_keys = []
        for info in colliding_infos:
            for key, value in info.tree.items():
                sorted_keys.append(key)
        sorted_keys.sort()
        sorted_pks = [public_key for (message_hash, public_key) in sorted_keys]
        computed_Ts = hash_pks(len(colliding_infos), sorted_pks)

        # Group order, exponents can be reduced mod the order
        order = sorted_pks[0].value.ec.n

        new_tree = {}
        for i in range(len(colliding_infos)):
            for key, value in colliding_infos[i].tree.items():
                if key not in new_tree:
                    # This message & pk have not been included yet
                    new_tree[key] = (value * computed_Ts[i]) % order
                else:
                    # This message and pk are already included, so multiply
                    addend = value * computed_Ts[i]
                    new_tree[key] = (new_tree[key] + addend) % order
        mh_pubkeys = [k for k, v in new_tree.items()]
        mh_pubkeys.sort()
        message_hashes = [
            message_hash for (message_hash, public_key) in mh_pubkeys
        ]
        public_keys = [public_key for (message_hash, public_key) in mh_pubkeys]
        return AggregationInfo(new_tree, message_hashes, public_keys)
Esempio n. 5
0
    def aggregate_priv_keys(private_keys, public_keys, secure):
        """
        Aggregates private keys together
        """
        if secure and len(private_keys) != len(public_keys):
            raise Exception("Invalid number of keys")

        priv_pub_keys = [(public_keys[i], private_keys[i])
                         for i in range(len(private_keys))]
        # Sort by public keys
        priv_pub_keys.sort()

        computed_Ts = hash_pks(len(private_keys), public_keys)

        n = public_keys[0].value.ec.n
        sum_keys = 0
        for i in range(len(priv_pub_keys)):
            addend = priv_pub_keys[i][1].value
            if (secure):
                addend *= computed_Ts[i]
            sum_keys = (sum_keys + addend) % n

        return PrivateKey.from_bytes(sum_keys.to_bytes(32, "big"))
Esempio n. 6
0
    def aggregate_sigs(signatures):
        """
        Aggregates many (aggregate) signatures, using a combination of simple
        and secure aggregation. Signatures are grouped based on which ones
        share common messages, and these are all merged securely.
        """
        public_keys = []  # List of lists
        message_hashes = []  # List of lists

        for signature in signatures:
            if signature.aggregation_info.empty():
                raise Exception(
                    "Each signature must have a valid aggregation " + "info")
            public_keys.append(signature.aggregation_info.public_keys)
            message_hashes.append(signature.aggregation_info.message_hashes)

        # Find colliding vectors, save colliding messages
        messages_set = set()
        colliding_messages_set = set()

        for msg_vector in message_hashes:
            messages_set_local = set()
            for msg in msg_vector:
                if msg in messages_set and msg not in messages_set_local:
                    colliding_messages_set.add(msg)
                messages_set.add(msg)
                messages_set_local.add(msg)

        if len(colliding_messages_set) == 0:
            # There are no colliding messages between the groups, so we
            # will just aggregate them all simply. Note that we assume
            # that every group is a valid aggregate signature. If an invalid
            # or insecure signature is given, and invalid signature will
            # be created. We don't verify for performance reasons.
            final_sig = BLS.aggregate_sigs_simple(signatures)
            aggregation_infos = [sig.aggregation_info for sig in signatures]
            final_agg_info = AggregationInfo.merge_infos(aggregation_infos)
            final_sig.set_aggregation_info(final_agg_info)
            return final_sig

        # There are groups that share messages, therefore we need
        # to use a secure form of aggregation. First we find which
        # groups collide, and securely aggregate these. Then, we
        # use simple aggregation at the end.
        colliding_sigs = []
        non_colliding_sigs = []
        colliding_message_hashes = []  # List of lists
        colliding_public_keys = []  # List of lists

        for i in range(len(signatures)):
            group_collides = False
            for msg in message_hashes[i]:
                if msg in colliding_messages_set:
                    group_collides = True
                    colliding_sigs.append(signatures[i])
                    colliding_message_hashes.append(message_hashes[i])
                    colliding_public_keys.append(public_keys[i])
                    break
            if not group_collides:
                non_colliding_sigs.append(signatures[i])

        # Arrange all signatures, sorted by their aggregation info
        colliding_sigs.sort(key=lambda s: s.aggregation_info)

        # Arrange all public keys in sorted order, by (m, pk)
        sort_keys_sorted = []
        for i in range(len(colliding_public_keys)):
            for j in range(len(colliding_public_keys[i])):
                sort_keys_sorted.append((colliding_message_hashes[i][j],
                                         colliding_public_keys[i][j]))
        sort_keys_sorted.sort()
        sorted_public_keys = [pk for (mh, pk) in sort_keys_sorted]

        computed_Ts = hash_pks(len(colliding_sigs), sorted_public_keys)

        # Raise each sig to a power of each t,
        # and multiply all together into agg_sig
        ec = sorted_public_keys[0].value.ec
        agg_sig = JacobianPoint(Fq2.one(ec.q), Fq2.one(ec.q), Fq2.zero(ec.q),
                                True, ec)

        for i, signature in enumerate(colliding_sigs):
            agg_sig += signature.value * computed_Ts[i]

        for signature in non_colliding_sigs:
            agg_sig += signature.value

        final_sig = Signature.from_g2(agg_sig)
        aggregation_infos = [sig.aggregation_info for sig in signatures]
        final_agg_info = AggregationInfo.merge_infos(aggregation_infos)
        final_sig.set_aggregation_info(final_agg_info)

        return final_sig