Пример #1
0
class TripleGenerator(PreProcessingBase):
    def __init__(self, n, t, my_id, send, recv, batch_size=10):
        super(TripleGenerator, self).__init__(
            n,
            t,
            my_id,
            send,
            recv,
            "triple",
            batch_size,
            avss_value_processor_chunk_size=3,
        )
        self.field = GF(Subgroup.BLS12_381)

    def _get_input_batch(self):
        inputs = []
        for _ in range(self.batch_size):
            a, b = self.field.random(), self.field.random()
            ab = a * b
            inputs += [a.value, b.value, ab.value]
        return inputs

    async def _extract(self):
        while True:
            async for batch in self._get_output_batch(3):
                triple_shares_int = await asyncio.gather(*batch)
                # Number of nodes which have contributed values to this batch
                n = len(triple_shares_int)
                assert n % 3 == 0

                for i in range(0, n, 3):
                    a, b, ab = triple_shares_int[i : i + 3]
                    self.output_queue.put_nowait((a, b, ab))
Пример #2
0
def test_singleton_pattern():
    gf_1 = GF(19)
    gf_2 = GF(19)
    assert gf_1 is gf_2
    assert gf_1.modulus == 19
    assert gf_2.modulus == 19
    assert gf_1 is GF(19)
    assert GF(19).modulus == 19
Пример #3
0
 def __init__(self, n, t, my_id, send, recv, batch_size=10):
     super(TripleGenerator, self).__init__(
         n,
         t,
         my_id,
         send,
         recv,
         "triple",
         batch_size,
         avss_value_processor_chunk_size=3,
     )
     self.field = GF(Subgroup.BLS12_381)
Пример #4
0
def test_invalid_operations_on_fields():
    field1, field2 = GF(17), GF(7)
    operators = [
        operator.add,
        operator.sub,
        operator.mul,
        operator.truediv,
        operator.floordiv,
        operator.eq,
    ]
    for op in operators:
        with pytest.raises(FieldsNotIdentical):
            op(field1(2), field2(3))
Пример #5
0
def test_benchmark_hbavss_dealer(test_router, benchmark, t, k):
    loop = asyncio.get_event_loop()
    n = 3 * t + 1
    field = GF(Subgroup.BLS12_381)
    g, h, pks, sks = get_avss_params(n + 1, t)
    crs = gen_pc_const_crs(t, g=g, h=h)
    pc = PolyCommitConst(crs, field=field)
    pc.preprocess_prover(8)
    pc.preprocess_verifier(8)
    values = [field.random() for _ in range(k)]
    params = (t, n, g, h, pks, sks, crs, pc, values, field)

    def _prog():
        loop.run_until_complete(hbavss_multibatch_dealer(test_router, params))

    benchmark(_prog)
Пример #6
0
async def test_hbavss_batch_batch_gf(test_router):
    t = 2
    n = 3 * t + 1

    g, h, pks, sks = get_avss_params(n, t)
    sends, recvs, _ = test_router(n)
    crs = gen_pc_const_crs(t, g=g, h=h)
    field = GF(Subgroup.BLS12_381)
    values = [field.random() for _ in range(50)]
    avss_tasks = [None] * n
    dealer_id = randint(0, n - 1)

    shares = [None] * n
    with ExitStack() as stack:
        hbavss_list = [None] * n
        for i in range(n):
            hbavss = HbAvssBatch(pks,
                                 sks[i],
                                 crs,
                                 n,
                                 t,
                                 i,
                                 sends[i],
                                 recvs[i],
                                 field=field)
            hbavss_list[i] = hbavss
            stack.enter_context(hbavss)
            if i == dealer_id:
                avss_tasks[i] = asyncio.create_task(
                    hbavss.avss(0, values=values))
            else:
                avss_tasks[i] = asyncio.create_task(
                    hbavss.avss(0, dealer_id=dealer_id))
            avss_tasks[i].add_done_callback(print_exception_callback)
        outputs = await asyncio.gather(
            *[hbavss_list[i].output_queue.get() for i in range(n)])
        shares = [output[2] for output in outputs]
        for task in avss_tasks:
            task.cancel()

    fliped_shares = list(map(list, zip(*shares)))
    recovered_values = []
    for item in fliped_shares:
        recovered_values.append(
            polynomials_over(field).interpolate_at(zip(range(1, n + 1), item)))

    assert recovered_values == values
Пример #7
0
async def _run(peers, n, t, k, my_id):
    field = GF(Subgroup.BLS12_381)
    async with ProcessProgramRunner(peers, n, t, my_id) as runner:
        send, recv = runner.get_send_recv("0")
        start_time = time.time()
        await randousha(n, t, k, my_id, send, recv, field)
        end_time = time.time()
        logging.info("[%d] Finished in %s", my_id, end_time - start_time)
Пример #8
0
async def test_hbavss_light_gf(test_router):
    t = 2
    n = 3 * t + 1

    g, h, pks, sks = get_avss_params(n, t)
    sends, recvs, _ = test_router(n)
    crs = [g, h]
    field = GF(Subgroup.BLS12_381)
    value = field.random()
    avss_tasks = [None] * n
    hbavss_list = [None] * n
    dealer_id = randint(0, n - 1)

    with ExitStack() as stack:
        for i in range(n):
            hbavss = HbAvssLight(pks,
                                 sks[i],
                                 crs,
                                 n,
                                 t,
                                 i,
                                 sends[i],
                                 recvs[i],
                                 field=field)
            hbavss_list[i] = hbavss
            stack.enter_context(hbavss)
            if i == dealer_id:
                avss_tasks[i] = asyncio.create_task(hbavss.avss(0,
                                                                value=value))
            else:
                avss_tasks[i] = asyncio.create_task(
                    hbavss.avss(0, dealer_id=dealer_id))
            avss_tasks[i].add_done_callback(print_exception_callback)
        # shares = await asyncio.gather(*avss_tasks)
        outputs = await asyncio.gather(
            *[hbavss_list[i].output_queue.get() for i in range(n)])
        for task in avss_tasks:
            task.cancel()
    shares = []
    for item in outputs:
        shares.append(item[2])
    assert polynomials_over(field).interpolate_at(zip(range(1, n + 1),
                                                      shares)) == value
Пример #9
0
async def test_mimc_jubjub_pkc(test_runner):

    field = GF(Subgroup.BLS12_381)
    plaintext = [field.random().value]
    seed = field.random().value

    async def _prog(context):
        # Key Generation
        priv_key, pub_key = await key_generation(context)

        # Encryption & Decryption
        cipher = mimc_encrypt(pub_key, plaintext, seed)
        decrypted_value = await mimc_decrypt(context, priv_key, cipher)
        decrypted = await asyncio.gather(*[d.open() for d in decrypted_value])

        assert decrypted == plaintext

    await test_runner(_prog, n, t, PREPROCESSING, k,
                      STANDARD_ARITHMETIC_MIXINS)
Пример #10
0
def test_benchmark_gao_robust_decode(benchmark, t, galois_field):
    n = 3 * t + 1
    galois_field = GF(Subgroup.BLS12_381)
    point = EvalPoint(galois_field, n)
    dec = GaoRobustDecoder(t, point)
    parties = [_ for _ in range(n)]
    poly = polynomials_over(galois_field)
    truepoly = poly.random(degree=t)
    faults = []
    while len(faults) < t:
        r = randint(0, n - 1)
        if r not in faults:
            faults.append(r)
    shares_with_faults = []
    for i in parties:
        if i in faults:
            shares_with_faults.append(int(galois_field.random()))
        else:
            shares_with_faults.append(int(truepoly(i + 1)))
    benchmark(dec.robust_decode, parties, shares_with_faults)
Пример #11
0
    async def _prog(context):
        x = context.preproc.get_zero(context)
        field = GF(Subgroup.BLS12_381)
        key = field(15)

        # Compute F_MiMC_mpc
        mm = await mimc_mpc(context, x, key)
        mm_open = await mm.open()

        # open x, then compute F_MiMC_plain
        x_open = await x.open()
        mp = mimc_plain(x_open, key)

        # Compare the MPC evaluation to the plain one
        assert mm_open == mp
Пример #12
0
async def test_mimc_symmetric(test_runner):
    field = GF(Subgroup.BLS12_381)
    plaintext = [randint(0, field.modulus)]
    key_ = field(randint(0, field.modulus))

    async def _prog(context):
        key = context.preproc.get_zero(context) + key_

        cipher = mimc_encrypt(key_, plaintext)
        decrypted_value = await mimc_decrypt(context, key, cipher)

        decrypted_open = await context.ShareArray(decrypted_value).open()
        assert decrypted_open == plaintext

    await test_runner(_prog, n, t, PREPROCESSING, k, MIXINS)
class RandomGenerator(PreProcessingBase):
    def __init__(self, n, t, my_id, send, recv, batch_size=10):
        super(RandomGenerator, self).__init__(n, t, my_id, send, recv, "rand",
                                              batch_size)
        self.field = GF(Subgroup.BLS12_381)

    def _get_input_batch(self):
        return [self.field.random().value for _ in range(self.batch_size)]

    async def _extract(self):
        while True:
            async for batch in self._get_output_batch():
                random_shares_int = await asyncio.gather(*batch)
                output_shares_int = refine_randoms(self.n, self.t, self.field,
                                                   random_shares_int)
                for value in output_shares_int:
                    self.output_queue.put_nowait(self.field(value))
Пример #14
0
async def test_mimc_mpc_batch(test_runner):
    field = GF(Subgroup.BLS12_381)
    key = field(randint(0, field.modulus))

    async def _prog(context):
        xs = [context.preproc.get_rand(context) for _ in range(20)]

        # Compute F_MiMC_mpc, mm - mimc_mpc
        mm = await mimc_mpc_batch(context, xs, key)
        mm_open = await context.ShareArray(mm).open()

        # open x, then compute F_MiMC_plain, mp - mimc_plain
        xs_open = await context.ShareArray(xs).open()
        mp = [mimc_plain(x, key) for x in xs_open]

        # Compare the MPC evaluation to the plain one
        assert mm_open == mp

    await test_runner(_prog, n, t, PREPROCESSING, k, MIXINS)
Пример #15
0
STANDARD_ARITHMETIC_MIXINS = [
    BeaverMultiply(),
    BeaverMultiplyArrays(),
    InvertShare(),
    InvertShareArray(),
    DivideShares(),
    DivideShareArrays(),
    Equality(),
    LessThan(),
]

PREPROCESSING = ["rands", "triples", "zeros", "cubes", "bits"]
n, t = 3, 1

FIELD = GF(Subgroup.BLS12_381)

p = FIELD.modulus
MAX = (p - 1) / 2
DIFF = 5
ranges = [0, p // 2**128, p // 2**64, p // 2**32, p // 2**16, MAX - DIFF]
range_pairs = [(x, y) for x, y in zip(ranges[:-1], ranges[1:])]


@mark.asyncio
@mark.parametrize("begin,end", range_pairs)
async def test_less_than(begin, end, test_runner):
    pp_elements = PreProcessedElements()
    pp_elements.generate_share_bits(50, n, t)
    a_values = [randint(begin, end) for _ in range(3)]
    b_values = [a_values[0] - DIFF, a_values[1], a_values[2] + DIFF]
Пример #16
0
    HbmpcConfig.load_config()

    run_id = HbmpcConfig.extras["run_id"]
    k = int(HbmpcConfig.extras["k"])

    pp_elements = PreProcessedElements()
    pp_elements.clear_preprocessing()

    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()

    try:
        if not HbmpcConfig.skip_preprocessing:
            # Need to keep these fixed when running on processes.
            field = GF(Subgroup.BLS12_381)
            a_s = [field(i) for i in range(1000 + k, 1000, -1)]

            if HbmpcConfig.my_id == 0:
                pp_elements.generate_rands(k, HbmpcConfig.N, HbmpcConfig.t)
                pp_elements.generate_powers(k, HbmpcConfig.N, HbmpcConfig.t, k)
                pp_elements.preprocessing_done()
            else:
                loop.run_until_complete(pp_elements.wait_for_preprocessing())

        loop.run_until_complete(
            async_mixing_in_processes(
                HbmpcConfig.peers,
                HbmpcConfig.N,
                HbmpcConfig.t,
                k,
Пример #17
0
def galois_field():
    from honeybadgermpc.field import GF
    from honeybadgermpc.elliptic_curve import Subgroup

    return GF(Subgroup.BLS12_381)
Пример #18
0
def test_multiple_fields():
    field1 = GF(17)
    field2 = GF(7)
    assert field1.modulus == 17
    assert field2.modulus == 7
Пример #19
0
def test_bool():
    from honeybadgermpc.field import GF

    field1 = GF(17)
    assert bool(field1(23))
    assert not bool(field1(0))
Пример #20
0
 def __init__(self, n, t, my_id, send, recv, batch_size=10):
     super(RandomGenerator, self).__init__(
         n, t, my_id, send, recv, "rand", batch_size
     )
     self.field = GF(Subgroup.BLS12_381)
Пример #21
0
def make_wb_encoder_decoder(n, k, p, point=None):
    """
    n: number of symbols to encode
    k: number of symbols in the message
        (k=t+1) where t is the degree of the polynomial
    """
    if not k <= n <= p:
        raise Exception(
            "Must have k <= n <= p but instead had (n,k,p) == (%r, %r, %r)" % (n, k, p)
        )
    t = k - 1  # degree of polynomial
    fp = GF(p)
    poly = polynomials_over(fp)

    # the message points correspond to polynomial evaluations
    # at either f(i) for convenience, or
    #    f( omega^i ) where omega. If omega is an n'th root of unity,
    # then we can do efficient FFT-based polynomial interpolations.
    if point is None or type(point) is not EvalPoint:
        point = EvalPoint(fp, n, use_omega_powers=False)

    # message is a list of integers at most p
    def encode(message):
        if not all(x < p for x in message):
            raise Exception(
                "Message is improperly encoded as integers < p. It was:\n%r" % message
            )
        assert len(message) == t + 1

        the_poly = poly(message)
        return [the_poly(point(i)) for i in range(n)]

    def solve_system(encoded_message, max_e, debug=False):
        """
        input: points in form (x,y)
        output: coefficients of interpolated polynomial

        due to Jeremy Kun
        https://jeremykun.com/2015/09/07/welch-berlekamp/
        """
        for e in range(max_e, 0, -1):
            e_num_vars = e + 1
            q_num_vars = e + k

            def row(i, a, b):
                return (
                    [b * a ** j for j in range(e_num_vars)]
                    + [-1 * a ** j for j in range(q_num_vars)]
                    + [0]
                )  # the "extended" part of the linear system

            system = [row(i, a, b) for (i, (a, b)) in enumerate(encoded_message)] + [
                [fp(0)] * (e_num_vars - 1) + [fp(1)] + [fp(0)] * (q_num_vars) + [fp(1)]
            ]  # ensure coefficient of x^e in E(x) is 1

            if debug:
                logging.debug("\ne is %r" % e)
                logging.debug("\nsystem is:\n\n")
                for row in system:
                    logging.debug("\t%r" % (row,))

            solution = some_solution(system, free_variable_value=1)
            e_ = poly([solution[j] for j in range(e + 1)])
            q_ = poly([solution[j] for j in range(e + 1, len(solution))])

            if debug:
                logging.debug("\nreduced system is:\n\n")
                for row in system:
                    logging.debug("\t%r" % (row,))

                logging.debug("solution is %r" % (solution,))
                logging.debug("Q is %r" % (q_,))
                logging.debug("E is %r" % (e_,))

            p_, remainder = q_.__divmod__(e_)
            if debug:
                logging.debug("P(x) = %r" % p_)
                logging.debug("r(x) = %r" % remainder)
            if remainder.is_zero():
                return q_, e_
        raise ValueError("found no divisors!")

    def decode(encoded_msg, debug=True):
        assert len(encoded_msg) == n
        c = sum(m is None for m in encoded_msg)  # number of erasures
        assert 2 * t + 1 + c <= n
        # e = ceil((n - c - t - 1) / 2) = ((n - c - t) // 2)
        e = (n - c - t) // 2
        if debug:
            logging.debug(f"n: {n} k: {k} t: {t} c: {c}")
            logging.debug(f"decoding with e: {e}")
            logging.debug(f"decoding with c: {c}")

        enc_m = [(point(i), m) for i, m in enumerate(encoded_msg) if m is not None]

        if e == 0:
            # decode with no errors
            p_ = poly.interpolate(enc_m)
            return p_.coeffs

        q_, e_ = solve_system(enc_m, max_e=e, debug=debug)
        p_, remainder = q_.__divmod__(e_)
        if not remainder.is_zero():
            raise Exception("Q is not divisibly by E!")
        return p_.coeffs

    return encode, decode, solve_system
Пример #22
0
Secure Computation With Fixed-Point Numbers
Catrina and Saxena
http://www.ifca.ai/pub/fc10/31_47.pdf
"""

# Fixed Point parameters

F = 32  # The precision (binary bits)
"""
This implementation of the library is not completely hiding. This leaks information about the bits used in computation which is determinied by the security parameter Kappa.
In particular, we leak O(1/(2^Kappa)) information theorotic bits per operation on a floating point secret.
"""
KAPPA = 32  # Statistical security parameter
K = 64  # Total number of padding bits ()
p = modulus = Subgroup.BLS12_381
Field = GF(p)
"""
Library for fixed point operations:
Throughout the code:
`x` stands for private floating point number
`k` is the total of number of bits we are representing

"""

# General (non MPC) fixed point functions
""" Change a function to fixed point form. In general when we are dealing with fixed points, it
is convert them back to fixed form.

2.5 with f = 32 goes to 2.5* 2**32 which is an int.
Note that this function always rounds down the error
"""