示例#1
0
async def main_loop(w3):
    pp_elements = PreProcessedElements()
    # deletes sharedata/ if present
    pp_elements.clear_preprocessing()

    # Step 1.
    # Create the coordinator contract and web3 interface to it
    compiled_sol = compile_source(
        open(os.path.join(os.path.dirname(__file__),
                          "asynchromix.sol")).read())  # Compiled source code
    contract_interface = compiled_sol["<stdin>:AsynchromixCoordinator"]
    contract_class = w3.eth.contract(abi=contract_interface["abi"],
                                     bytecode=contract_interface["bin"])
    # tx_hash = contract_class.constructor(w3.eth.accounts[:7],2).transact(
    #   {'from':w3.eth.accounts[0]})  # n=7, t=2

    tx_hash = contract_class.constructor(w3.eth.accounts[:4], 1).transact(
        {"from": w3.eth.accounts[0]})  # n=4, t=1

    # Get tx receipt to get contract address
    tx_receipt = await wait_for_receipt(w3, tx_hash)
    contract_address = tx_receipt["contractAddress"]

    if w3.eth.getCode(contract_address) == b"":
        logging.critical(
            "code was empty 0x, constructor may have run out of gas")
        raise ValueError

    # Contract instance in concise mode
    abi = contract_interface["abi"]
    contract = w3.eth.contract(address=contract_address, abi=abi)
    contract_concise = ConciseContract(contract)

    # Call read only methods to check
    n = contract_concise.n()

    # Step 2: Create the servers
    router = SimpleRouter(n)
    sends, recvs = router.sends, router.recvs
    servers = [
        AsynchromixServer("sid", i, sends[i], recvs[i], w3, contract)
        for i in range(n)
    ]

    # Step 3. Create the client
    async def req_mask(i, idx):
        # client requests input mask {idx} from server {i}
        return servers[i]._inputmasks[idx]

    client = AsynchromixClient("sid", "client", None, None, w3, contract,
                               req_mask)

    # Step 4. Wait for conclusion
    for i, server in enumerate(servers):
        await server.join()
    await client.join()
示例#2
0
文件: fixtures.py 项目: tyurek/hbACSS
    def __init__(self):
        from honeybadgermpc.preprocessing import PreProcessedElements
        from honeybadgermpc.preprocessing import PreProcessingConstants as Constants
        from honeybadgermpc.progs.mixins.base import MixinBase

        makedirs(Constants.SHARED_DATA_DIR.value, exist_ok=True)
        self.test_data_dir = f"{mkdtemp(dir=Constants.SHARED_DATA_DIR.value)}/"
        PreProcessedElements.DEFAULT_DIRECTORY = self.test_data_dir

        PreProcessedElements.reset_cache()
        self.elements = PreProcessedElements(data_directory=self.test_data_dir)
        MixinBase.pp_elements = self.elements
async def test_degree_reduction_share(galois_field, test_runner):
    n, t = 7, 2
    pp_elements = PreProcessedElements()
    x_expected = galois_field.random().value
    sid_x_2t = pp_elements.generate_share(n, 2 * t, x_expected)

    async def _prog(context):
        sh_x_2t = context.preproc.get_share(context, sid_x_2t, 2 * t)
        x_actual = await (await DoubleSharingMultiply.reduce_degree_share(
            context, sh_x_2t)).open()
        assert x_expected == x_actual

    await run_test_program(_prog, test_runner, n, t)
示例#4
0
async def test_get_share():
    n, t = 4, 1
    x = 41
    pp_elements = PreProcessedElements()
    sid = pp_elements.generate_share(n, t, x)

    async def _prog(ctx):
        x_sh = ctx.preproc.get_share(ctx, sid)
        assert await x_sh.open() == x

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#5
0
async def test_get_zero():
    n, t = 4, 1
    num_zeros = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_zeros(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_zeros):
            x_sh = ctx.preproc.get_zero(ctx)
            assert await x_sh.open() == 0

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#6
0
async def test_butterfly_network():
    n, t, k, delta = 3, 1, 32, -9999
    pp_elements = PreProcessedElements()
    pp_elements.generate_rands(1000, n, t)
    pp_elements.generate_one_minus_ones(1000, n, t)
    pp_elements.generate_triples(1500, n, t)

    async def verify_output(ctx, **kwargs):
        k, delta = kwargs["k"], kwargs["delta"]
        inputs = [ctx.preproc.get_rand(ctx) for _ in range(k)]
        sorted_input = sorted(await ctx.ShareArray(inputs).open(),
                              key=lambda x: x.value)

        share_arr = await butterfly.butterfly_network_helper(ctx,
                                                             k=k,
                                                             delta=delta,
                                                             inputs=inputs)
        outputs = await share_arr.open()

        assert len(sorted_input) == len(outputs)
        sorted_output = sorted(outputs, key=lambda x: x.value)
        for i, j in zip(sorted_input, sorted_output):
            assert i == j

    program_runner = TaskProgramRunner(
        n, t, {MixinConstants.MultiplyShareArray: BeaverMultiplyArrays()})
    program_runner.add(verify_output, k=k, delta=delta)
    await program_runner.join()
示例#7
0
async def test_get_triple():
    n, t = 4, 1
    num_triples = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_triples(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_triples):
            a_sh, b_sh, ab_sh = ctx.preproc.get_triples(ctx)
            a, b, ab = await a_sh.open(), await b_sh.open(), await ab_sh.open()
            assert a * b == ab

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#8
0
async def test_get_rand():
    n, t = 4, 1
    num_rands = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_rands(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_rands):
            # Nothing to assert here, just check if the
            # required number of rands are generated
            ctx.preproc.get_rand(ctx)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#9
0
async def test_get_share_bits():
    n, t, = 4, 1
    pp_elements = PreProcessedElements()
    pp_elements.generate_share_bits(1, n, t)

    async def _prog(ctx):
        share, bits = ctx.preproc.get_share_bits(ctx)
        opened_share = await share.open()
        opened_bits = await asyncio.gather(*[b.open() for b in bits])
        bit_value = int("".join([str(b.value) for b in reversed(opened_bits)]), 2)
        assert bit_value == opened_share.value

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#10
0
async def test_get_bit():
    n, t = 4, 1
    num_bits = 20
    pp_elements = PreProcessedElements()
    pp_elements.generate_bits(1000, n, t)

    async def _prog(ctx):
        shares = [ctx.preproc.get_bit(ctx) for _ in range(num_bits)]
        x = ctx.ShareArray(shares)
        x_ = await x.open()
        for i in x_:
            assert i == 0 or i == 1

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#11
0
async def test_get_double_share():
    n, t = 9, 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_double_shares(1000, n, t)

    async def _prog(ctx):
        r_t_sh, r_2t_sh = ctx.preproc.get_double_shares(ctx)
        assert r_t_sh.t == ctx.t
        assert r_2t_sh.t == ctx.t * 2
        await r_t_sh.open()
        await r_2t_sh.open()
        assert await r_t_sh.open() == await r_2t_sh.open()

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#12
0
async def test_get_cube():
    n, t = 4, 1
    num_cubes = 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_cubes(1000, n, t)

    async def _prog(ctx):
        for _ in range(num_cubes):
            a1_sh, a2_sh, a3_sh = ctx.preproc.get_cubes(ctx)
            a1, a2, a3 = await a1_sh.open(), await a2_sh.open(), await a3_sh.open()
            assert a1 * a1 == a2
            assert a1 * a2 == a3

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#13
0
async def test_get_powers():
    n, t = 4, 1
    pp_elements = PreProcessedElements()
    nums, num_powers = 2, 3

    pp_elements.generate_powers(num_powers, n, t, nums)

    async def _prog(ctx):
        for i in range(nums):
            powers = ctx.preproc.get_powers(ctx, i)
            x = await powers[0].open()
            for i, power in enumerate(powers[1:]):
                assert await power.open() == pow(x, i + 2)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    await program_runner.join()
示例#14
0
async def test_less_than(begin, end, test_runner):
    pp_elements = PreProcessedElements()
    pp_elements.generate_share_bits(50, n, t)
    a_values = [randint(begin, end) for _ in range(3)]
    b_values = [a_values[0] - DIFF, a_values[1], a_values[2] + DIFF]

    async def _prog(context):
        a_shares = [context.Share(v) for v in a_values]
        b_shares = [context.Share(v) for v in b_values]

        results = await gather(*[(a_ < b_).open()
                                 for a_, b_ in zip(a_shares, b_shares)])

        for (res, a, b) in zip(results, a_values, b_values):
            assert bool(res) == (a < b)

    await test_runner(_prog, n, t, PREPROCESSING, 2500,
                      STANDARD_ARITHMETIC_MIXINS)
示例#15
0
def get_bit_dec_setup_commands(s3manager, instance_ids):
    from honeybadgermpc.preprocessing import PreProcessedElements
    from honeybadgermpc.preprocessing import PreProcessingConstants as Constants

    n, t = AwsConfig.TOTAL_VM_COUNT, AwsConfig.MPC_CONFIG.T

    logging.info("Starting to create preprocessing files.")
    stime = time()
    pp_elements = PreProcessedElements()
    pp_elements.generate_triples(150000, n, t)
    pp_elements.generate_rands(66000, n, t)
    pp_elements.generate_bits(10000, n, t)
    pp_elements.generate_zeros(200, n, t)
    logging.info(f"Preprocessing files created in {time()-stime}")

    setup_commands = []
    total_time = 0
    logging.info(f"Uploading input files to AWS S3.")
    stime = time()

    triple_urls = s3manager.upload_files(
        [build_file_name_triple(n, t, i) for i in range(n)])
    rands_urls = s3manager.upload_files(
        [build_file_name_rand(n, t, i) for i in range(n)])
    zeros_urls = s3manager.upload_files(
        [build_file_name_zero(n, t, i) for i in range(n)])
    bits_urls = s3manager.upload_files(
        [build_file_name_bit(n, t, i) for i in range(n)])
    logging.info(f"Inputs successfully uploaded in {time()-stime} seconds.")

    setup_commands = [[
        instance_id,
        [
            "sudo docker pull %s" % (AwsConfig.DOCKER_IMAGE_PATH),
            "mkdir -p sharedata",
            "cd sharedata; curl -sSO %s" % (triple_urls[i]),
            "cd sharedata; curl -sSO %s" % (rands_urls[i]),
            "cd sharedata; curl -sSO %s" % (zeros_urls[i]),
            "cd sharedata; curl -sSO %s" % (bits_urls[i]),
            "mkdir -p benchmark-logs",
        ],
    ] for i, instance_id in enumerate(instance_ids)]
    return setup_commands
示例#16
0
async def test_open_shares():
    n, t = 3, 1
    number_of_secrets = 100
    pp_elements = PreProcessedElements()
    pp_elements.generate_zeros(1000, n, t)

    async def _prog(context):
        secrets = []
        for _ in range(number_of_secrets):
            s = await context.preproc.get_zero(context).open()
            assert s == 0
            secrets.append(s)
        print("[%d] Finished" % (context.myid, ))
        return secrets

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(_prog)
    results = await program_runner.join()
    assert len(results) == n
    assert all(len(secrets) == number_of_secrets for secrets in results)
    assert all(secret == 0 for secrets in results for secret in secrets)
示例#17
0
async def tutorial_fixedpoint():
    n, t = 4, 1
    pp = FakePreProcessedElements()
    pp.generate_zeros(100, n, t)
    pp.generate_triples(1000, n, t)
    pp.generate_bits(1000, n, t)
    program_runner = TaskProgramRunner(n, t, config)
    program_runner.add(_prog)
    results = await program_runner.join()
    return results
示例#18
0
async def test_phase1(galois_field):
    field = galois_field
    n, t, k = 5, 2, 1
    pp_elements = PreProcessedElements()
    pp_elements.generate_powers(k, n, t, 1)
    pp_elements.generate_rands(k, n, t)

    async def verify_phase1(ctx, **kwargs):
        k_ = kwargs["k"]
        b_ = await ctx.preproc.get_powers(ctx, 0)[0].open()
        file_prefixes = [uuid4().hex]
        await pm.all_secrets_phase1(ctx, k=k, file_prefixes=file_prefixes)
        file_name = f"{file_prefixes[0]}-{ctx.myid}.input"
        file_path = f"{pp_elements.data_directory}{file_name}"
        with open(file_path, "r") as f:
            assert int(f.readline()) == field.modulus
            # next line is a random share, which should open successfully
            a_ = await ctx.Share(int(f.readline())).open()
            assert int(f.readline()) == (a_ - b_).value
            assert int(f.readline()) == k_
            for i in range(1, k_ + 1):
                assert (await
                        ctx.Share(int(f.readline())).open()).value == b_**(i)

    program_runner = TaskProgramRunner(n, t)
    program_runner.add(verify_phase1, k=k)
    await program_runner.join()
示例#19
0
async def test_open_future_shares():
    n, t = 4, 1
    pp_elements = PreProcessedElements()
    pp_elements.generate_rands(1000, n, t)
    pp_elements.generate_triples(1000, n, t)

    async def _prog(context):
        e1_, e2_ = [context.preproc.get_rand(context) for _ in range(2)]
        e1, e2 = await asyncio.gather(*[e1_.open(), e2_.open()],
                                      return_exceptions=True)

        s_prod_f = e1_ * e2_
        s_prod_f2 = s_prod_f * e1_
        final_prod = s_prod_f2 + e1_ + e2_
        final_prod_2 = final_prod * e1_
        wrapped_final_prod_2 = context.Share(final_prod_2.open())

        assert await s_prod_f2.open() == e1 * e1 * e2
        assert await final_prod.open() == e1 * e1 * e2 + e1 + e2
        assert await final_prod_2.open() == (e1 * e1 * e2 + e1 + e2) * e1
        assert await wrapped_final_prod_2.open() == await final_prod_2.open()

    program_runner = TaskProgramRunner(
        n, t, {MixinConstants.MultiplyShare: BeaverMultiply()})
    program_runner.add(_prog)
    await program_runner.join()
async def test_cant_multiply_shares_from_different_contexts():
    from honeybadgermpc.mpc import TaskProgramRunner
    import asyncio

    n, t, k = 9, 2, 2000

    pp_elements = PreProcessedElements()
    pp_elements.generate_double_shares(k, n, t)
    pp_elements.generate_rands(k, n, t)

    async def _prog(context):
        share = context.Share(1)
        return share

    test_runner_1 = TaskProgramRunner(n, t)
    test_runner_2 = TaskProgramRunner(n, t)

    test_runner_1.add(_prog)
    test_runner_2.add(_prog)

    s1, s2 = await asyncio.gather(test_runner_1.join(), test_runner_2.join())

    async def _prog2(context):
        with raises(TypeError):
            await s1[0] * s2[0]

    test_runner_3 = TaskProgramRunner(
        n, t, {DoubleSharingMultiply.name,
               DoubleSharingMultiply()})
    test_runner_3.add(_prog2)
    await test_runner_3.join()
示例#21
0
def get_ipc_setup_commands(s3manager, instance_ids):
    from honeybadgermpc.preprocessing import PreProcessedElements
    from honeybadgermpc.preprocessing import PreProcessingConstants as Constants

    n, t = AwsConfig.TOTAL_VM_COUNT, AwsConfig.MPC_CONFIG.T

    num_triples = AwsConfig.MPC_CONFIG.NUM_TRIPLES
    pp_elements = PreProcessedElements()

    pp_elements.generate_zeros(num_triples, n, t)
    pp_elements.generate_triples(num_triples, n, t)

    triple_urls = s3manager.upload_files([
        pp_elements.mixins[Constants.TRIPLES]._build_file_name(n, t, i)
        for i in range(n)
    ])
    zero_urls = s3manager.upload_files([
        pp_elements.mixins[Constants.ZEROS]._build_file_name(n, t, i)
        for i in range(n)
    ])

    setup_commands = [[
        instance_id,
        [
            "sudo docker pull %s" % (AwsConfig.DOCKER_IMAGE_PATH),
            "mkdir -p sharedata",
            "cd sharedata; curl -sSO %s" % (triple_urls[i]),
            "cd sharedata; curl -sSO %s" % (zero_urls[i]),
            "mkdir -p benchmark-logs",
        ],
    ] for i, instance_id in enumerate(instance_ids)]

    return setup_commands
示例#22
0
文件: fixtures.py 项目: tyurek/hbACSS
    def _benchmark_runner(prog, n=4, t=1, to_generate=[], k=1000, mixins=[]):
        pp_elements = PreProcessedElements()
        _preprocess(pp_elements, n, t, k, to_generate)

        config = _build_config(mixins)
        program_runner = TaskProgramRunner(n, t, config)
        program_runner.add(prog)
        loop = asyncio.get_event_loop()

        def _work():
            loop.run_until_complete(program_runner.join())

        benchmark(_work)
示例#23
0
async def tutorial_1():
    # Create a test network of 4 nodes (no sockets, just asyncio tasks)
    n, t = 4, 1
    pp = FakePreProcessedElements()
    pp.generate_zeros(100, n, t)
    pp.generate_triples(100, n, t)
    pp.generate_bits(100, n, t)
    program_runner = TaskProgramRunner(n, t, config)
    program_runner.add(prog)
    results = await program_runner.join()
    return results
示例#24
0
def get_butterfly_network_setup_commands(max_k, s3manager, instance_ids):
    from honeybadgermpc.preprocessing import PreProcessedElements
    from honeybadgermpc.preprocessing import PreProcessingConstants as Constants

    n, t = AwsConfig.TOTAL_VM_COUNT, AwsConfig.MPC_CONFIG.T
    k = max_k if max_k else AwsConfig.MPC_CONFIG.K

    logging.info("Starting to create preprocessing files.")
    stime = time()
    num_switches = k * int(log(k, 2))**2
    pp_elements = PreProcessedElements()
    pp_elements.generate_triples(2 * num_switches, n, t)
    pp_elements.generate_one_minus_ones(num_switches, n, t)
    pp_elements.generate_rands(k, n, t)
    logging.info(f"Preprocessing files created in {time()-stime}")

    logging.info("Uploading inputs to AWS S3.")
    stime = time()
    triple_urls = s3manager.upload_files([
        pp_elements.mixins[Constants.TRIPLES]._build_file_name(n, t, i)
        for i in range(n)
    ])
    input_urls = s3manager.upload_files([
        pp_elements.mixins[Constants.RANDS]._build_file_name(n, t, i)
        for i in range(n)
    ])
    rand_share_urls = s3manager.upload_files([
        pp_elements.mixins[Constants.ONE_MINUS_ONE]._build_file_name(n, t, i)
        for i in range(n)
    ])
    logging.info(f"Inputs successfully uploaded in {time()-stime} seconds.")

    setup_commands = [[
        instance_id,
        [
            "sudo docker pull %s" % (AwsConfig.DOCKER_IMAGE_PATH),
            "mkdir -p sharedata",
            "cd sharedata; curl -sSO %s" % (triple_urls[i]),
            "cd sharedata; curl -sSO %s" % (rand_share_urls[i]),
            "cd sharedata; curl -sSO %s" % (input_urls[i]),
            "mkdir -p benchmark-logs",
        ],
    ] for i, instance_id in enumerate(instance_ids)]

    return setup_commands
示例#25
0
def get_powermixing_setup_commands(max_k, runid, s3manager, instance_ids):
    from honeybadgermpc.preprocessing import PreProcessedElements
    from honeybadgermpc.preprocessing import PreProcessingConstants as Constants

    n, t = AwsConfig.TOTAL_VM_COUNT, AwsConfig.MPC_CONFIG.T
    k = max_k if max_k else AwsConfig.MPC_CONFIG.K

    logging.info("Starting to create preprocessing files.")
    stime = time()
    pp_elements = PreProcessedElements()
    pp_elements.generate_powers(k, n, t, k)
    pp_elements.generate_rands(k, n, t)
    logging.info(f"Preprocessing files created in {time()-stime}")

    setup_commands = []
    total_time = 0
    logging.info(f"Uploading input files to AWS S3.")

    for i, instance_id in enumerate(instance_ids):
        url = s3manager.upload_file(f"aws/download_input.sh")
        commands = [
            "sudo docker pull %s" % (AwsConfig.DOCKER_IMAGE_PATH),
            f"curl -sSO {url}",
            "mkdir -p sharedata",
            "cp download_input.sh sharedata/download_input.sh ",
            "mkdir -p benchmark-logs",
            "ulimit -n 10000",
        ]
        file_names = []
        for j in range(k):
            prefix1 = f"{pp_elements.mixins[Constants.POWERS].file_prefix}_{j}"
            file_names.append(
                pp_elements.mixins[Constants.POWERS].build_filename(
                    n, t, i, prefix=prefix1))

            file_names.append(
                pp_elements.mixins[Constants.RANDS].build_filename(n, t, i))

        stime = time()
        urls = s3manager.upload_files(file_names)
        total_time += time() - stime
        with open("%s-%d-links" % (runid, i), "w") as f:
            for url in urls:
                print(url, file=f)
        fname = f"{runid}-{i}-links"
        url = s3manager.upload_file(fname)
        commands.append(
            f"cd sharedata; curl -sSO {url}; bash download_input.sh {fname}")
        setup_commands.append([instance_id, commands])

    logging.info(f"Upload completed in {total_time} seconds.")

    return setup_commands
async def test_degree_reduction_share_array(test_runner):
    n, t = 7, 2
    pp_elements = PreProcessedElements()
    pp_elements.generate_rands(1000, n, 2 * t)
    pp_elements.generate_double_shares(1000, n, t)

    async def _prog(context):
        shares = [context.preproc.get_rand(context, 2 * t) for _ in range(10)]
        sh_x_2t = context.ShareArray(shares, 2 * t)
        x_actual = await (
            await DoubleSharingMultiplyArrays.reduce_degree_share_array(
                context, sh_x_2t)).open()

        x_expected = await sh_x_2t.open()
        for a, b in zip(x_actual, x_expected):
            assert a == b

    await run_test_program(_prog, test_runner, n, t)
示例#27
0
async def test_mpc_programs(peers, n, t, my_id):
    from honeybadgermpc.mpc import test_prog1, test_prog2, test_batchopening
    from honeybadgermpc.preprocessing import PreProcessedElements
    from honeybadgermpc.preprocessing import wait_for_preprocessing, preprocessing_done

    if not HbmpcConfig.skip_preprocessing:
        # Only one party needs to generate the preprocessed elements for testing
        if HbmpcConfig.my_id == 0:
            pp_elements = PreProcessedElements()
            pp_elements.generate_zeros(1000, HbmpcConfig.N, HbmpcConfig.t)
            pp_elements.generate_triples(1000, HbmpcConfig.N, HbmpcConfig.t)
            preprocessing_done()
        else:
            await wait_for_preprocessing()

    async with ProcessProgramRunner(peers, n, t, my_id) as runner:
        test_prog1  # r1 = runner.execute("0", test_prog1)
        r2 = runner.execute("1", test_prog2)
        r3 = runner.execute("2", test_batchopening)
        results = await asyncio.gather(*[r2, r3])
        return results
示例#28
0
async def test_asynchronous_mixing():
    import asyncio
    import apps.asynchromix.powermixing as pm
    from honeybadgermpc.mpc import TaskProgramRunner

    n, t, k = 3, 1, 4
    pp_elements = PreProcessedElements()
    pp_elements.generate_powers(k, n, t, k)
    pp_elements.generate_rands(1000, n, t)

    async def verify_output(context, **kwargs):
        result, input_shares = kwargs["result"], kwargs["input_shares"]
        my_shares = input_shares[context.myid]
        assert len(result) == len(my_shares)

        inputs = await asyncio.gather(
            *[context.Share(sh.v, t).open() for sh in my_shares])
        assert sorted(map(lambda x: x.value, inputs)) == sorted(result)

    result, input_shares = await pm.async_mixing(n, t, k)
    program_runner = TaskProgramRunner(n, t)
    program_runner.add(verify_output, result=result, input_shares=input_shares)
    await program_runner.join()
示例#29
0
        result = solve([s.value for s in power_sums])
        bench_logger.info(f"[SolverPhase] Run Newton Solver: {time() - stime}")
        logging.info("Equation solver completed.")
        logging.debug(result)
        return result


if __name__ == "__main__":
    from honeybadgermpc.config import HbmpcConfig

    HbmpcConfig.load_config()

    run_id = HbmpcConfig.extras["run_id"]
    k = int(HbmpcConfig.extras["k"])

    pp_elements = PreProcessedElements()
    pp_elements.clear_preprocessing()

    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()

    try:
        if not HbmpcConfig.skip_preprocessing:
            # Need to keep these fixed when running on processes.
            field = GF(Subgroup.BLS12_381)
            a_s = [field(i) for i in range(1000 + k, 1000, -1)]

            if HbmpcConfig.my_id == 0:
                pp_elements.generate_rands(k, HbmpcConfig.N, HbmpcConfig.t)
                pp_elements.generate_powers(k, HbmpcConfig.N, HbmpcConfig.t, k)
                pp_elements.preprocessing_done()
示例#30
0
    async def _mixing_loop(self):
        # Task 3. Participating in mixing epochs
        contract_concise = ConciseContract(self.contract)
        pp_elements = PreProcessedElements()
        n = contract_concise.n()
        t = contract_concise.t()
        K = contract_concise.K()  # noqa: N806
        PER_MIX_TRIPLES = contract_concise.PER_MIX_TRIPLES()  # noqa: N806
        PER_MIX_BITS = contract_concise.PER_MIX_BITS()  # noqa: N806

        epoch = 0
        while True:
            # 3.a. Wait for the next mix to be initiated
            while True:
                epochs_initiated = contract_concise.epochs_initiated()
                if epochs_initiated > epoch:
                    break
                await asyncio.sleep(5)

            # 3.b. Collect the inputs
            inputs = []
            for idx in range(epoch * K, (epoch + 1) * K):
                # Get the public input
                masked_input, inputmask_idx = contract_concise.input_queue(idx)
                masked_input = field(int.from_bytes(masked_input, "big"))
                # Get the input masks
                inputmask = self._inputmasks[inputmask_idx]

                m_share = masked_input - inputmask
                inputs.append(m_share)

            # 3.c. Collect the preprocessing
            triples = self._triples[
                (epoch + 0) * PER_MIX_TRIPLES : (epoch + 1) * PER_MIX_TRIPLES
            ]
            bits = self._bits[(epoch + 0) * PER_MIX_BITS : (epoch + 1) * PER_MIX_BITS]

            # Hack explanation... the relevant mixins are in triples
            key = (self.myid, n, t)
            for mixin in (pp_elements._triples, pp_elements._one_minus_ones):
                if key in mixin.cache:
                    del mixin.cache[key]
                    del mixin.count[key]

            # 3.d. Call the MPC program
            async def prog(ctx):
                pp_elements._init_data_dir()

                # Overwrite triples and one_minus_ones
                for kind, elems in zip(("triples", "one_minus_one"), (triples, bits)):
                    if kind == "triples":
                        elems = flatten_lists(elems)
                    elems = [e.value for e in elems]

                    mixin = pp_elements.mixins[kind]
                    mixin_filename = mixin.build_filename(ctx.N, ctx.t, ctx.myid)
                    mixin._write_preprocessing_file(
                        mixin_filename, ctx.t, ctx.myid, elems, append=False
                    )

                pp_elements._init_mixins()

                logging.info(f"[{ctx.myid}] Running permutation network")
                inps = list(map(ctx.Share, inputs))
                assert len(inps) == K

                shuffled = await iterated_butterfly_network(ctx, inps, K)
                shuffled_shares = ctx.ShareArray(list(map(ctx.Share, shuffled)))

                opened_values = await shuffled_shares.open()
                msgs = [
                    m.value.to_bytes(32, "big").decode().strip("\x00")
                    for m in opened_values
                ]

                return msgs

            send, recv = self.get_send_recv(f"mpc:{epoch}")
            logging.info(f"[{self.myid}] MPC initiated:{epoch}")

            # Config just has to specify mixins used by switching_network
            config = {MixinConstants.MultiplyShareArray: BeaverMultiplyArrays()}

            ctx = Mpc(f"mpc:{epoch}", n, t, self.myid, send, recv, prog, config)
            result = await ctx._run()
            logging.info(f"[{self.myid}] MPC complete {result}")

            # 3.e. Output the published messages to contract
            result = ",".join(result)
            tx_hash = self.contract.functions.propose_output(epoch, result).transact(
                {"from": self.w3.eth.accounts[self.myid]}
            )
            tx_receipt = await wait_for_receipt(self.w3, tx_hash)
            rich_logs = self.contract.events.MixOutput().processReceipt(tx_receipt)
            if rich_logs:
                epoch = rich_logs[0]["args"]["epoch"]
                output = rich_logs[0]["args"]["output"]
                logging.info(f"[{self.myid}] MIX OUTPUT[{epoch}] {output}")
            else:
                pass

            epoch += 1

        pass