Пример #1
0
async def test_phase1(galois_field):
    field = galois_field
    n, t, k = 5, 2, 1
    pp_elements = PreProcessedElements()
    pp_elements.generate_powers(k, n, t, 1)
    pp_elements.generate_rands(k, n, t)

    async def verify_phase1(ctx, **kwargs):
        k_ = kwargs["k"]
        b_ = await ctx.preproc.get_powers(ctx, 0)[0].open()
        file_prefixes = [uuid4().hex]
        await pm.all_secrets_phase1(ctx, k=k, file_prefixes=file_prefixes)
        file_name = f"{file_prefixes[0]}-{ctx.myid}.input"
        file_path = f"{pp_elements.data_directory}{file_name}"
        with open(file_path, "r") as f:
            assert int(f.readline()) == field.modulus
            # next line is a random share, which should open successfully
            a_ = await ctx.Share(int(f.readline())).open()
            assert int(f.readline()) == (a_ - b_).value
            assert int(f.readline()) == k_
            for i in range(1, k_ + 1):
                assert (await
                        ctx.Share(int(f.readline())).open()).value == b_**(i)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(verify_phase1, k=k)
    await program_runner.join()
async def test_get_random(test_router, rust_field):
    n, t = 4, 1
    sends, recvs, _ = test_router(n)

    with ExitStack() as stack:
        random_generators = [None] * n
        tasks = [None] * n
        for i in range(n):
            random_generators[i] = RandomGenerator(n, t, i, sends[i], recvs[i],
                                                   1)
            stack.enter_context(random_generators[i])
            tasks[i] = asyncio.create_task(random_generators[i].get())

        shares = await asyncio.gather(*tasks)
        assert len(shares) == n

    async def _prog(context):
        opened_share = await context.Share(shares[context.myid]).open()
        return opened_share

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    rands = await program_runner.join()

    assert len(rands) == n
    # Verify that all nodes have the same values
    assert rands.count(rands[0]) == n
Пример #3
0
async def test_open_future_shares():
    n, t = 4, 1
    pp_elements = PreProcessedElements()
    pp_elements.generate_rands(1000, n, t)
    pp_elements.generate_triples(1000, n, t)

    async def _prog(context):
        e1_, e2_ = [context.preproc.get_rand(context) for _ in range(2)]
        e1, e2 = await asyncio.gather(*[e1_.open(), e2_.open()],
                                      return_exceptions=True)

        s_prod_f = e1_ * e2_
        s_prod_f2 = s_prod_f * e1_
        final_prod = s_prod_f2 + e1_ + e2_
        final_prod_2 = final_prod * e1_
        wrapped_final_prod_2 = context.Share(final_prod_2.open())

        assert await s_prod_f2.open() == e1 * e1 * e2
        assert await final_prod.open() == e1 * e1 * e2 + e1 + e2
        assert await final_prod_2.open() == (e1 * e1 * e2 + e1 + e2) * e1
        assert await wrapped_final_prod_2.open() == await final_prod_2.open()

    program_runner = TaskProgramRunner(
        n, t, {MixinConstants.MultiplyShare: BeaverMultiply()})
    program_runner.add(_prog)
    await program_runner.join()
async def test_get_randoms(test_router, rust_field, n, t, b):
    sends, recvs, _ = test_router(n)

    with ExitStack() as stack:
        random_generators = [None] * n
        tasks = [None] * n * b
        for i in range(n):
            # k => each node has a different batch size
            # for the number of values which it AVSSes
            k = randint(1, 10)
            random_generators[i] = RandomGenerator(n, t, i, sends[i], recvs[i],
                                                   k)
            stack.enter_context(random_generators[i])
            for j in range(b):
                tasks[b * i + j] = asyncio.create_task(
                    random_generators[i].get())

        shares = await asyncio.gather(*tasks)
        assert len(shares) == n * b

    async def _prog(context):
        s = context.myid * b
        opened_share = await context.ShareArray(shares[s:s + b]).open()
        return tuple(opened_share)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    rands = await program_runner.join()

    assert len(rands) == n
    # Verify that all nodes have the same values
    assert rands.count(rands[0]) == n
async def test_get_triple(test_router, rust_field):
    n, t = 4, 1
    sends, recvs, _ = test_router(n)

    with ExitStack() as stack:
        triple_generators = [None] * n
        tasks = [None] * n
        for i in range(n):
            triple_generators[i] = TripleGenerator(n, t, i, sends[i], recvs[i],
                                                   1)
            stack.enter_context(triple_generators[i])
            tasks[i] = asyncio.create_task(triple_generators[i].get())

        shares = await asyncio.gather(*tasks)
        assert len(shares) == n

    async def _prog(context):
        a, b, ab = await context.ShareArray(list(shares[context.myid])).open()
        assert a * b == ab
        return tuple((a, b, ab))

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    triples = await program_runner.join()

    assert len(triples) == n
    # Verify that all nodes have the same values
    assert triples.count(triples[0]) == n
Пример #6
0
async def test_butterfly_network():
    n, t, k, delta = 3, 1, 32, -9999
    pp_elements = PreProcessedElements()
    pp_elements.generate_rands(1000, n, t)
    pp_elements.generate_one_minus_ones(1000, n, t)
    pp_elements.generate_triples(1500, n, t)

    async def verify_output(ctx, **kwargs):
        k, delta = kwargs["k"], kwargs["delta"]
        inputs = [ctx.preproc.get_rand(ctx) for _ in range(k)]
        sorted_input = sorted(await ctx.ShareArray(inputs).open(),
                              key=lambda x: x.value)

        share_arr = await butterfly.butterfly_network_helper(ctx,
                                                             k=k,
                                                             delta=delta,
                                                             inputs=inputs)
        outputs = await share_arr.open()

        assert len(sorted_input) == len(outputs)
        sorted_output = sorted(outputs, key=lambda x: x.value)
        for i, j in zip(sorted_input, sorted_output):
            assert i == j

    program_runner = TaskProgramRunner(
        n, t, {MixinConstants.MultiplyShareArray: BeaverMultiplyArrays()})
    program_runner.add(verify_output, k=k, delta=delta)
    await program_runner.join()
Пример #7
0
    async def _test_runner(prog, n=4, t=1, to_generate=[], k=1000, mixins=[]):
        _preprocess(n, t, k, to_generate)

        config = _build_config(mixins)
        program_runner = TaskProgramRunner(n, t, config)
        program_runner.add(prog)

        return await program_runner.join()
Пример #8
0
async def tutorial_fixedpoint():
    n, t = 4, 1
    pp = FakePreProcessedElements()
    pp.generate_zeros(100, n, t)
    pp.generate_triples(1000, n, t)
    pp.generate_bits(1000, n, t)
    program_runner = TaskProgramRunner(n, t, config)
    program_runner.add(_prog)
    results = await program_runner.join()
    return results
Пример #9
0
async def tutorial_1():
    # Create a test network of 4 nodes (no sockets, just asyncio tasks)
    n, t = 4, 1
    pp = FakePreProcessedElements()
    pp.generate_zeros(100, n, t)
    pp.generate_triples(100, n, t)
    pp.generate_bits(100, n, t)
    program_runner = TaskProgramRunner(n, t, config)
    program_runner.add(prog)
    results = await program_runner.join()
    return results
Пример #10
0
async def prog():
    n, t = 4, 1
    pp = FakePreProcessedElements()
    pp.generate_zeros(1000, n, t)
    pp.generate_triples(120000, n, t)
    pp.generate_share_bits(1000, n, t)
    pp.generate_bits(3000, n, t)
    pp.generate_rands(10000, n, t)
    program_runner = TaskProgramRunner(n, t, config)
    program_runner.add(laesa_test_1)
    results = await program_runner.join()
    return results
Пример #11
0
    def _benchmark_runner(prog, n=4, t=1, to_generate=[], k=1000, mixins=[]):
        pp_elements = PreProcessedElements()
        _preprocess(pp_elements, n, t, k, to_generate)

        config = _build_config(mixins)
        program_runner = TaskProgramRunner(n, t, config)
        program_runner.add(prog)
        loop = asyncio.get_event_loop()

        def _work():
            loop.run_until_complete(program_runner.join())

        benchmark(_work)
Пример #12
0
async def test_get_share():
    n, t = 4, 1
    x = 41
    pp_elements = PreProcessedElements()
    sid = pp_elements.generate_share(n, t, x)

    async def _prog(ctx):
        x_sh = ctx.preproc.get_share(ctx, sid)
        assert await x_sh.open() == x

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #13
0
async def test_get_zero():
    n, t = 4, 1
    num_zeros = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_zeros(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_zeros):
            x_sh = ctx.preproc.get_zero(ctx)
            assert await x_sh.open() == 0

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #14
0
async def test_get_triple():
    n, t = 4, 1
    num_triples = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_triples(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_triples):
            a_sh, b_sh, ab_sh = ctx.preproc.get_triples(ctx)
            a, b, ab = await a_sh.open(), await b_sh.open(), await ab_sh.open()
            assert a * b == ab

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #15
0
async def test_get_rand():
    n, t = 4, 1
    num_rands = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_rands(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_rands):
            # Nothing to assert here, just check if the
            # required number of rands are generated
            ctx.preproc.get_rand(ctx)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #16
0
async def test_get_share_bits():
    n, t, = 4, 1
    pp_elements = PreProcessedElements()
    pp_elements.generate_share_bits(1, n, t)

    async def _prog(ctx):
        share, bits = ctx.preproc.get_share_bits(ctx)
        opened_share = await share.open()
        opened_bits = await asyncio.gather(*[b.open() for b in bits])
        bit_value = int("".join([str(b.value) for b in reversed(opened_bits)]), 2)
        assert bit_value == opened_share.value

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #17
0
async def test_get_double_share():
    n, t = 9, 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_double_shares(1000, n, t)

    async def _prog(ctx):
        r_t_sh, r_2t_sh = ctx.preproc.get_double_shares(ctx)
        assert r_t_sh.t == ctx.t
        assert r_2t_sh.t == ctx.t * 2
        await r_t_sh.open()
        await r_2t_sh.open()
        assert await r_t_sh.open() == await r_2t_sh.open()

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #18
0
async def test_get_bit():
    n, t = 4, 1
    num_bits = 20
    pp_elements = PreProcessedElements()
    pp_elements.generate_bits(1000, n, t)

    async def _prog(ctx):
        shares = [ctx.preproc.get_bit(ctx) for _ in range(num_bits)]
        x = ctx.ShareArray(shares)
        x_ = await x.open()
        for i in x_:
            assert i == 0 or i == 1

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #19
0
async def test_get_cube():
    n, t = 4, 1
    num_cubes = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_cubes(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_cubes):
            a1_sh, a2_sh, a3_sh = ctx.preproc.get_cubes(ctx)
            a1, a2, a3 = await a1_sh.open(), await a2_sh.open(), await a3_sh.open()
            assert a1 * a1 == a2
            assert a1 * a2 == a3

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #20
0
async def test_get_powers():
    n, t = 4, 1
    pp_elements = PreProcessedElements()
    nums, num_powers = 2, 3

    pp_elements.generate_powers(num_powers, n, t, nums)

    async def _prog(ctx):
        for i in range(nums):
            powers = ctx.preproc.get_powers(ctx, i)
            x = await powers[0].open()
            for i, power in enumerate(powers[1:]):
                assert await power.open() == pow(x, i + 2)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #21
0
async def test_open_shares():
    n, t = 3, 1
    number_of_secrets = 100
    pp_elements = PreProcessedElements()
    pp_elements.generate_zeros(1000, n, t)

    async def _prog(context):
        secrets = []
        for _ in range(number_of_secrets):
            s = await context.preproc.get_zero(context).open()
            assert s == 0
            secrets.append(s)
        print("[%d] Finished" % (context.myid, ))
        return secrets

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    results = await program_runner.join()
    assert len(results) == n
    assert all(len(secrets) == number_of_secrets for secrets in results)
    assert all(secret == 0 for secrets in results for secret in secrets)
Пример #22
0
async def test_hbavss_light_share_open(test_router):
    t = 2
    n = 3 * t + 1

    g, h, pks, sks = get_avss_params(n, t)
    sends, recvs, _ = test_router(n)
    crs = [g, h]

    value = ZR.random()
    avss_tasks = [None] * n
    hbavss_list = [None] * n
    dealer_id = randint(0, n - 1)

    with ExitStack() as stack:
        for i in range(n):
            hbavss = HbAvssLight(pks, sks[i], crs, n, t, i, sends[i], recvs[i])
            hbavss_list[i] = hbavss
            stack.enter_context(hbavss)
            if i == dealer_id:
                avss_tasks[i] = asyncio.create_task(hbavss.avss(0,
                                                                value=value))
            else:
                avss_tasks[i] = asyncio.create_task(
                    hbavss.avss(0, dealer_id=dealer_id))
        outputs = await asyncio.gather(
            *[hbavss_list[i].output_queue.get() for i in range(n)])
        for task in avss_tasks:
            task.cancel()
    shares = []
    for item in outputs:
        shares.append(item[2])

    async def _prog(context):
        share_value = context.field(shares[context.myid])
        assert await context.Share(share_value).open() == value

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
Пример #23
0
async def test_asynchronous_mixing():
    import asyncio
    import apps.asynchromix.powermixing as pm
    from honeybadgermpc.mpc import TaskProgramRunner

    n, t, k = 3, 1, 4
    pp_elements = PreProcessedElements()
    pp_elements.generate_powers(k, n, t, k)
    pp_elements.generate_rands(1000, n, t)

    async def verify_output(context, **kwargs):
        result, input_shares = kwargs["result"], kwargs["input_shares"]
        my_shares = input_shares[context.myid]
        assert len(result) == len(my_shares)

        inputs = await asyncio.gather(
            *[context.Share(sh.v, t).open() for sh in my_shares])
        assert sorted(map(lambda x: x.value, inputs)) == sorted(result)

    result, input_shares = await pm.async_mixing(n, t, k)
    program_runner = TaskProgramRunner(n, t)
    program_runner.add(verify_output, result=result, input_shares=input_shares)
    await program_runner.join()
async def test_get_triples(test_router, rust_field, n, t, b):
    sends, recvs, _ = test_router(n)

    with ExitStack() as stack:
        triple_generators = [None] * n
        tasks = [None] * n * b
        for i in range(n):
            # k => each node has a different batch size
            # for the number of values which it AVSSes
            k = randint(50, 100)
            triple_generators[i] = TripleGenerator(n, t, i, sends[i], recvs[i],
                                                   k)
            stack.enter_context(triple_generators[i])
            for j in range(b):
                tasks[b * i + j] = asyncio.create_task(
                    triple_generators[i].get())

        shares = await asyncio.gather(*tasks)
        assert len(shares) == n * b

    async def _prog(context):
        s = context.myid * b
        triple_shares = sum(map(list, shares[s:s + b]), [])
        assert len(triple_shares) == b * 3
        opened_shares = await context.ShareArray(triple_shares).open()
        return tuple(opened_shares)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    triples = await program_runner.join()

    assert len(triples) == n
    # Verify that all nodes have the same values
    assert triples.count(triples[0]) == n
    for i in range(0, len(triples[0]), 3):
        p, q, pq = triples[0][i:i + 3]
        assert p * q == p * q
Пример #25
0
async def async_mixing(n, t, k):
    from .solver.solver import solve
    from honeybadgermpc.utils.task_pool import TaskPool

    pr1 = TaskProgramRunner(n, t)
    file_prefixes = [uuid.uuid4().hex for _ in range(k)]
    run_id = uuid.uuid4().hex

    pr1.add(all_secrets_phase1, k=k, file_prefixes=file_prefixes)
    rands = await pr1.join()

    pool = TaskPool(256)
    for node_id in range(n):
        for i in range(k):
            pool.submit(phase2(node_id, run_id, file_prefixes[i]))
    await pool.close()

    pr2 = TaskProgramRunner(n, t)
    pr2.add(phase3, k=k, run_id=run_id)
    power_sums = (await pr2.join())[0]
    logging.info("Shares from C++ phase opened.")
    result = solve([s.value for s in power_sums])
    logging.info("Equation solver completed.")
    return result, rands
async def test_cant_multiply_shares_from_different_contexts():
    from honeybadgermpc.mpc import TaskProgramRunner
    import asyncio

    n, t, k = 9, 2, 2000

    pp_elements = PreProcessedElements()
    pp_elements.generate_double_shares(k, n, t)
    pp_elements.generate_rands(k, n, t)

    async def _prog(context):
        share = context.Share(1)
        return share

    test_runner_1 = TaskProgramRunner(n, t)
    test_runner_2 = TaskProgramRunner(n, t)

    test_runner_1.add(_prog)
    test_runner_2.add(_prog)

    s1, s2 = await asyncio.gather(test_runner_1.join(), test_runner_2.join())

    async def _prog2(context):
        with raises(TypeError):
            await s1[0] * s2[0]

    test_runner_3 = TaskProgramRunner(
        n, t, {DoubleSharingMultiply.name,
               DoubleSharingMultiply()})
    test_runner_3.add(_prog2)
    await test_runner_3.join()
Пример #27
0
async def test_hbavss_light_parallel_share_array_open(test_router):
    t = 2
    n = 3 * t + 1
    k = 4

    g, h, pks, sks = get_avss_params(n, t)
    sends, recvs, _ = test_router(n)
    crs = [g, h]

    values = [int(ZR.random()) for _ in range(k)]
    dealer_id = randint(0, n - 1)

    with ExitStack() as stack:
        avss_tasks = [None] * n
        hbavss_list = [None] * n
        for i in range(n):
            hbavss = HbAvssLight(pks, sks[i], crs, n, t, i, sends[i], recvs[i])
            hbavss_list[i] = hbavss
            stack.enter_context(hbavss)

            if i == dealer_id:
                v, d = values, None
            else:
                v, d = None, dealer_id

            avss_tasks[i] = asyncio.create_task(
                hbavss.avss_parallel(0, k, v, d))
            avss_tasks[i].add_done_callback(print_exception_callback)

        outputs = [None] * k
        for j in range(k):
            outputs[j] = await asyncio.gather(
                *[hbavss_list[i].output_queue.get() for i in range(n)])

        for task in avss_tasks:
            task.cancel()
    # Sort the outputs incase they're out of order
    round_outputs = [[[] for __ in range(n)] for _ in range(k)]
    for i in range(k):
        for j in range(n):
            round_outputs[outputs[i][j][1]][j] = outputs[i][j]
    shares = [[] for _ in range(n)]
    for i in range(k):
        round_output = round_outputs[i]
        for j in range(len(round_output)):
            shares[j].append(round_output[j][2])

    async def _prog(context):
        share_values = list(map(context.field, shares[context.myid]))
        opened_shares = set(await context.ShareArray(share_values).open())

        # The set of opened share should have exactly `k` values
        assert len(opened_shares) == k

        # All the values in the set of opened shares should be from the initial values
        for i in opened_shares:
            assert i.value in values

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()