コード例 #1
0
 async def __aenter__(self):
     await self.node_communicator.__aenter__()
     self.subscribe_task, self.subscribe = subscribe_recv(
         self.node_communicator.recv
     )
     self.send = self.node_communicator.send
     return self
コード例 #2
0
    def __init__(self,
                 public_keys,
                 private_key,
                 crs,
                 n,
                 t,
                 my_id,
                 send,
                 recv,
                 pc=None,
                 field=ZR):  # (# noqa: E501)
        self.public_keys, self.private_key = public_keys, private_key
        self.n, self.t, self.my_id = n, t, my_id
        self.g = crs[0]

        # Create a mechanism to split the `recv` channels based on `tag`
        self.subscribe_recv_task, self.subscribe_recv = subscribe_recv(recv)

        # Create a mechanism to split the `send` channels based on `tag`
        def _send(tag):
            return wrap_send(tag, send)

        self.get_send = _send

        # This is added to consume the share the moment it is generated.
        # This is especially helpful when running multiple AVSSes in parallel.
        self.output_queue = asyncio.Queue()

        self.field = field
        self.poly = polynomials_over(self.field)
        if pc is None:
            self.poly_commit = PolyCommitLin(crs, field=self.field)
            self.poly_commit.preprocess(5)
        else:
            self.poly_commit = pc
コード例 #3
0
    def __init__(
        self,
        n,
        t,
        my_id,
        send,
        recv,
        tag,
        batch_size=10,
        avss_value_processor_chunk_size=1,
    ):
        self.n, self.t, self.my_id = n, t, my_id
        self.tag = tag
        self.avss_value_processor_chunk_size = avss_value_processor_chunk_size

        # Batch size of values to AVSS from a node
        self.batch_size = batch_size
        # Minimum number of values before triggering another set of AVSSes
        self.low_watermark = self.batch_size

        self.output_queue = asyncio.Queue()

        # Create a mechanism to split the `send` and `recv` channels based on `tag`
        subscribe_recv_task, subscribe = subscribe_recv(recv)
        self.tasks = [subscribe_recv_task]

        def _get_send_recv(tag):
            return wrap_send(tag, send), subscribe(tag)

        self.get_send_recv = _get_send_recv
コード例 #4
0
ファイル: asynchromix.py プロジェクト: tyurek/hbACSS
    def __init__(self, sid, myid, send, recv, w3, contract):
        self.sid = sid
        self.myid = myid
        self.contract = contract
        self.w3 = w3
        self._task1a = asyncio.ensure_future(self._offline_inputmasks_loop())
        self._task1a.add_done_callback(print_exception_callback)
        self._task1b = asyncio.ensure_future(self._offline_mixes_loop())
        self._task1b.add_done_callback(print_exception_callback)
        self._task2 = asyncio.ensure_future(self._client_request_loop())
        self._task2.add_done_callback(print_exception_callback)
        self._task3 = asyncio.ensure_future(self._mixing_loop())
        self._task3.add_done_callback(print_exception_callback)
        self._task4 = asyncio.ensure_future(self._mixing_initiate_loop())
        self._task4.add_done_callback(print_exception_callback)

        self._subscribe_task, subscribe = subscribe_recv(recv)

        def _get_send_recv(tag):
            return wrap_send(tag, send), subscribe(tag)

        self.get_send_recv = _get_send_recv

        self._inputmasks = []
        self._triples = []
        self._bits = []
コード例 #5
0
ファイル: avss_value_processor.py プロジェクト: tyurek/hbACSS
    def __init__(self,
                 pk,
                 sk,
                 n,
                 t,
                 my_id,
                 send,
                 recv,
                 get_input,
                 chunk_size=1):
        # This stores the AVSSed values which have been received from each dealer.
        self.inputs_per_dealer = [list() for _ in range(n)]

        # This stores the AVSSed values for each dealer which have been agreed.
        # This is a list of Futures which are marked done when the value is received.
        # The Future gurantees that the AVSS value has been agreed by at least `t+1`
        # parties and will resolve to a share once it is received by this node.
        self.outputs_per_dealer = [list() for _ in range(n)]

        # This stores a list of the indices of the next AVSS value to be returned
        # when a consumer requests a value dealt from a particular dealer.
        self.next_idx_to_return_per_dealer = [0] * n

        # The input to ACS is the count of values received at this node dealt by all
        # the nodes. In order to get the share of the same element at each node the
        # ordering of messages per dealer needs to be ensured. This is ensured by a
        # using an instance of a sequencer per dealer. The sequencer buffers any values
        # received out of order and delivers them in an order based on the AVSS Id.
        self.sequencers = defaultdict(Sequencer)

        # This queue contains values AVSSed from all nodes such that at least `n-t`
        # nodes have received a value corresponding to a particular batch.
        # Eg: Let the following be the set of values received by all nodes:
        #
        #   |  0  |  1  |  2  |  3  |
        #   -------------------------
        #   |  x0 |  x1 |     |     |  => Each row is a batch.
        #   |     |     |     |     |
        #
        # n=4, t=1. Since n-t nodes have not yet received a value therefore we will not
        # add any of the available values to the output. Once we get a value dealt from
        # 2 or 3, then we can go ahead and output all the available values to the queue.
        self.output_queue = asyncio.Queue()

        # This is for retrieving consecutive values from the same dealer in a situation
        # when they are coupled to each other. This is true for triples and powers.
        self.chunk_size = chunk_size

        subscribe_recv_task, subscribe = subscribe_recv(recv)
        self.tasks = [subscribe_recv_task]

        def _get_send_recv(tag):
            return wrap_send(tag, send), subscribe(tag)

        self.get_send_recv = _get_send_recv

        self.pk, self.sk = pk, sk
        self.n, self.t, self.my_id = n, t, my_id
        self.get_input = get_input
コード例 #6
0
    def __init__(self,
                 public_keys,
                 private_key,
                 crs,
                 n,
                 t,
                 my_id,
                 send,
                 recv,
                 pc=None,
                 field=ZR):  # (# noqa: E501)
        self.public_keys, self.private_key = public_keys, private_key
        self.n, self.t, self.my_id = n, t, my_id
        assert len(crs) == 3
        assert len(crs[0]) == t + 1
        self.g = crs[0][0]

        # Create a mechanism to split the `recv` channels based on `tag`
        self.subscribe_recv_task, self.subscribe_recv = subscribe_recv(recv)

        # Create a mechanism to split the `send` channels based on `tag`
        def _send(tag):
            return wrap_send(tag, send)

        self.get_send = _send

        self.field = field
        self.poly = polynomials_over(self.field)
        if pc is not None:
            self.poly_commit = pc
        else:
            self.poly_commit = PolyCommitConst(crs, field=self.field)
            self.poly_commit.preprocess_prover()
            self.poly_commit.preprocess_verifier()

        self.avid_msg_queue = asyncio.Queue()
        self.tasks = []
        self.shares_future = asyncio.Future()
        self.output_queue = asyncio.Queue()
コード例 #7
0
async def generate_bits(n, t, k, my_id, _send, _recv, field):
    subscribe_recv_task, subscribe = subscribe_recv(_recv)

    def _get_send_recv(tag):
        return wrap_send(tag, _send), subscribe(tag)

    # Start listening for my share of t and 2t shares from all parties.
    send, recv = _get_send_recv("randousha")
    rs_t2t = await randousha(n, t, 2 * k, my_id, send, recv, field)

    # To generate bits, we generate a batch of `t,2t` sharings of
    # [u]_t, [u]_2t, [r]_t, [r]_2t. The goal is to recontruct `u^2`
    # so we can return `[u]/sqrt(u^2)`. The [r] sharings are used
    # for publicly reconstructing:
    #    u^2 = open([u]_t * [u]_t + [r]_2t) - [r]_t
    us_t2t = rs_t2t[0:k]
    rs_t2t = rs_t2t[k:2 * k]

    us_t, _ = zip(*us_t2t)
    us_t = list(map(field, us_t))
    rs_t, rs_2t = zip(*rs_t2t)

    # Compute degree reduction to get the bit
    async def prog(ctx):
        u2rs_2t = [u * u + r for u, r in zip(us_t, rs_2t)]
        assert len(u2rs_2t) == len(rs_t)
        u2rs = await ctx.ShareArray(u2rs_2t, 2 * t).open()
        u2s_t = [u2r - r for u2r, r in zip(u2rs, rs_t)]
        u2s = await ctx.ShareArray(u2s_t).open()
        bits = [u / u2.sqrt() for u, u2 in zip(us_t, u2s)]
        return bits

    # TODO: compute triples through degree reduction
    send, recv = _get_send_recv("opening")
    ctx = Mpc(f"mpc:opening", n, t, my_id, send, recv, prog, {})
    result = await ctx._run()
    # print(f'[{my_id}] Generate triples complete')
    subscribe_recv_task.cancel()
    return result
コード例 #8
0
async def generate_triples(n, t, k, my_id, _send, _recv, field):
    subscribe_recv_task, subscribe = subscribe_recv(_recv)

    def _get_send_recv(tag):
        return wrap_send(tag, _send), subscribe(tag)

    # Start listening for my share of t and 2t shares from all parties.
    send, recv = _get_send_recv("randousha")
    rs_t2t = await randousha(n, t, 3 * k, my_id, send, recv, field)

    as_t2t = rs_t2t[0 * k:1 * k]
    bs_t2t = rs_t2t[1 * k:2 * k]
    rs_t2t = rs_t2t[2 * k:3 * k]

    as_t, _ = zip(*as_t2t)
    bs_t, _ = zip(*bs_t2t)
    as_t = list(map(field, as_t))
    bs_t = list(map(field, bs_t))
    rs_t, rs_2t = zip(*rs_t2t)

    # Compute degree reduction to get triples
    # TODO: Use the mixins and preprocessing system
    async def prog(ctx):
        assert len(rs_2t) == len(rs_t) == len(as_t) == len(bs_t)

        abrs_2t = [a * b + r for a, b, r in zip(as_t, bs_t, rs_2t)]
        abrs = await ctx.ShareArray(abrs_2t, 2 * t).open()
        abs_t = [abr - r for abr, r in zip(abrs, rs_t)]
        return list(zip(as_t, bs_t, abs_t))

    # TODO: compute triples through degree reduction
    send, recv = _get_send_recv("opening")
    ctx = Mpc(f"mpc:opening", n, t, my_id, send, recv, prog, {})

    result = await ctx._run()
    subscribe_recv_task.cancel()

    return result
コード例 #9
0
async def randousha(n, t, k, my_id, _send, _recv, field):
    """
    Generates a batch of (n-2t)k secret sharings of random elements
    """
    poly = polynomials_over(field)
    eval_point = EvalPoint(field, n, use_omega_powers=False)
    big_t = n - (2 * t) - 1  # This is same as `T` in the HyperMPC paper.
    encoder = EncoderFactory.get(eval_point)

    # Pick k random elements
    def to_int(coeffs):
        return tuple(map(int, coeffs))

    my_randoms = [field.random() for _ in range(k)]

    # Generate t and 2t shares of the random element.
    coeffs_t = [to_int(poly.random(t, r).coeffs) for r in my_randoms]
    coeffs_2t = [to_int(poly.random(2 * t, r).coeffs) for r in my_randoms]
    unref_t = encoder.encode(coeffs_t)
    unref_2t = encoder.encode(coeffs_2t)

    subscribe_recv_task, subscribe = subscribe_recv(_recv)

    def _get_send_recv(tag):
        return wrap_send(tag, _send), subscribe(tag)

    # Start listening for my share of t and 2t shares from all parties.
    send, recv = _get_send_recv("H1")
    share_recv_task = asyncio.create_task(_recv_loop(n, recv))

    # Send each party their shares.
    to_send_t = transpose_lists(unref_t)
    to_send_2t = transpose_lists(unref_2t)
    for i in range(n):
        send(i, (to_send_t[i], to_send_2t[i]))

    # Wait until all shares are received.
    received_shares = await share_recv_task
    unrefined_t_shares, unrefined_2t_shares = zip(*received_shares)

    # Apply the hyper-invertible matrix.
    # Assume the unrefined shares to be coefficients of a polynomial
    # and then evaluate that polynomial at powers of omega.
    ref_t = encoder.encode(transpose_lists(list(unrefined_t_shares)))
    ref_2t = encoder.encode(transpose_lists(list(unrefined_2t_shares)))

    # Parties with id in [N-2t+1, N] need to start
    # listening for shares which they have to check.
    send, recv = _get_send_recv("H2")
    to_send_t = transpose_lists(ref_t)
    to_send_2t = transpose_lists(ref_2t)

    if my_id > big_t:
        share_chk_recv_task = asyncio.create_task(_recv_loop(n, recv))

    # Send shares of parties with id in [N-2t+1, N] to those parties.
    for i in range(big_t + 1, n):
        send(i, (to_send_t[i], to_send_2t[i]))

    # Parties with id in [N-2t+1, N] need to verify that the shares are in-fact correct.
    if my_id > big_t:
        shares_to_check = await share_chk_recv_task
        shares_t, shares_2t = zip(*shares_to_check)
        response = HyperInvMessageType.ABORT

        def get_degree(p):
            for i in range(len(p))[::-1]:
                if p[i] != 0:
                    return i
            return 0

        def get_degree_and_secret(shares):
            decoder = DecoderFactory.get(eval_point)
            polys = decoder.decode(list(range(n)),
                                   transpose_lists(list(shares)))
            secrets = [p[0] for p in polys]
            degrees = [get_degree(p) for p in polys]
            return degrees, secrets

        degree_t, secret_t = get_degree_and_secret(shares_t)
        degree_2t, secret_2t = get_degree_and_secret(shares_2t)

        # Verify that the shares are in-fact `t` and `2t` shared.
        # Verify that both `t` and `2t` shares of the same value.
        if (all(deg == t for deg in degree_t)
                and all(deg == 2 * t for deg in degree_2t)
                and secret_t == secret_2t):
            response = HyperInvMessageType.SUCCESS

        logging.debug(
            "[%d] Degree check: %s, Secret Check: %s",
            my_id,
            all(deg == t for deg in degree_t)
            and all(deg == 2 * t for deg in degree_2t),
            secret_t == secret_2t,
        )

    # Start listening for the verification response.
    send, recv = _get_send_recv("H3")
    response_recv_task = asyncio.create_task(
        _recv_loop(n - big_t - 1, recv, big_t + 1))

    # Send the verification response.
    if my_id > big_t:
        for i in range(n):
            send(i, response)

    responses = await response_recv_task
    subscribe_recv_task.cancel()

    # If any of [T+1, N] parties say that the shares are inconsistent then abort.
    if responses.count(HyperInvMessageType.SUCCESS) != n - big_t - 1:
        raise HoneyBadgerMPCError(
            "Aborting because the shares were inconsistent.")

    out_t = flatten_lists([s[:big_t + 1] for s in ref_t])
    out_2t = flatten_lists([s[:big_t + 1] for s in ref_2t])

    return tuple(zip(out_t, out_2t))
コード例 #10
0
async def batch_reconstruct(
    secret_shares,
    p,
    t,
    n,
    myid,
    send,
    recv,
    config=None,
    use_omega_powers=False,
    debug=False,
    degree=None,
):
    """
    args:
      shared_secrets: an array of points representing shared secrets S1 - SB
      p: field modulus
      t: faults tolerated
      n: total number of nodes n >= 3t+1
      myid: id of the specific node running batch_reconstruction function
      degree: degree of polynomial to decode (defaults to t)

    output:
      the reconstructed array of B shares

    Communication takes place over two rounds,
      objects sent/received of the form('R1', shares) or ('R2', shares)
      up to one of each for each party

    Reconstruction takes places in chunks of t+1 values
    """
    bench_logger = logging.LoggerAdapter(logging.getLogger("benchmark_logger"),
                                         {"node_id": myid})

    if degree is None:
        degree = t

    secret_shares = [v.value for v in secret_shares]

    # (optional) Induce faults
    if config is not None and config.induce_faults:
        logging.debug("[FAULT][BatchReconstruction] Sending random shares.")
        secret_shares = [
            random.randint(0, p - 1) for _ in range(len(secret_shares))
        ]

    # Prepare recv loops for this batch reconstruction
    subscribe_task, subscribe = subscribe_recv(recv)
    del recv  # ILC enforces this in type system, no duplication of reads

    task_r1, recvs_r1 = recv_each_party(subscribe("R1"), n)
    data_r1 = [asyncio.create_task(recv()) for recv in recvs_r1]

    task_r2, recvs_r2 = recv_each_party(subscribe("R2"), n)
    data_r2 = [asyncio.create_task(recv()) for recv in recvs_r2]
    del subscribe  # ILC should determine we can garbage collect after this

    # Set up encoding and decoding algorithms
    fp = GF(p)
    decoding_algorithm = Algorithm.GAO if config is None else config.decoding_algorithm

    point = EvalPoint(fp, n, use_omega_powers=use_omega_powers)
    enc = EncoderFactory.get(
        point, Algorithm.FFT if use_omega_powers else Algorithm.VANDERMONDE)
    dec = DecoderFactory.get(
        point, Algorithm.FFT if use_omega_powers else Algorithm.VANDERMONDE)
    robust_dec = RobustDecoderFactory.get(t,
                                          point,
                                          algorithm=decoding_algorithm)

    # Prepare data for step 1
    round1_chunks = chunk_data(secret_shares, degree + 1)
    num_chunks = len(round1_chunks)

    # Step 1: Compute the polynomial P1, then send the elements
    start_time = time.time()

    encoded = enc.encode(round1_chunks)
    to_send = transpose_lists(encoded)
    for dest, message in enumerate(to_send):
        send(dest, ("R1", message))

    end_time = time.time()
    bench_logger.info(f"[BatchReconstruct] P1 Send: {end_time - start_time}")

    # Step 2: Attempt to reconstruct P1
    start_time = time.time()
    try:
        recons_r2 = await incremental_decode(data_r1, enc, dec, robust_dec,
                                             num_chunks, t, degree, n)
    except asyncio.CancelledError:
        # Cancel all created tasks
        for task in [task_r1, task_r2, subscribe_task, *data_r1, *data_r2]:
            task.cancel()

    if recons_r2 is None:
        logging.error("[BatchReconstruct] P1 reconstruction failed!")
        return None

    end_time = time.time()
    bench_logger.info(
        f"[BatchReconstruct] P1 Reconstruct: {end_time - start_time}")

    # Step 3: Send R2 points
    start_time = time.time()

    # Evaluate all chunks at x=0, then broadcast
    message = [chunk[0] for chunk in recons_r2]
    for dest in range(n):
        send(dest, ("R2", message))

    end_time = time.time()
    bench_logger.info(f"[BatchReconstruct] P2 Send: {end_time - start_time}")

    # Step 4: Attempt to reconstruct R2
    start_time = time.time()
    try:
        recons_p = await incremental_decode(data_r2, enc, dec, robust_dec,
                                            num_chunks, t, degree, n)
    except asyncio.CancelledError:
        # Cancel all created tasks
        for task in [task_r1, task_r2, subscribe_task, *data_r1, *data_r2]:
            task.cancel()

    if recons_p is None:
        logging.error("[BatchReconstruct] P2 reconstruction failed!")
        return None

    end_time = time.time()
    bench_logger.info(
        f"[BatchReconstruct] P2 Reconstruct: {end_time - start_time}")

    # Cancel all created tasks
    for task in [task_r1, task_r2, subscribe_task, *data_r1, *data_r2]:
        task.cancel()

    result = flatten_lists(recons_p)
    assert len(result) >= len(secret_shares)

    # Get back result as GFElement type
    return list(map(fp, result[:len(secret_shares)]))