def test_generate_and_transfer_primitive( get_clients: Callable, nr_parties: int, nr_instances: int, nr_instances_retrieve: int, ) -> None: parties = get_clients(nr_parties) session = Session(parties=parties) SessionManager.setup_mpc(session) g_kwargs = {"nr_parties": nr_parties, "nr_instances": nr_instances} CryptoPrimitiveProvider.generate_primitives( "test", session=session, g_kwargs=g_kwargs, p_kwargs={}, ) for i in range(nr_parties): remote_crypto_store = session.session_ptrs[i].crypto_store primitives = remote_crypto_store.get_primitives_from_store( op_str="test", nr_instances=nr_instances_retrieve).get() assert primitives == [ tuple(ShareTensor(i) for _ in range(PRIMITIVE_NR_ELEMS)) for _ in range(nr_instances_retrieve) ]
def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon = Falcon("malicious") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1, ) shape_y = (1, ) # create an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( "beaver_mul", session=session, g_kwargs={ "session": session, "a_shape": shape_x, "b_shape": shape_y, "nr_parties": session.nr_parties, }, p_kwargs={ "a_shape": shape_x, "b_shape": shape_y }, )
def test_transfer_primitives_mismatch_len_exception() -> None: with pytest.raises(ValueError): """Primitives and Sesssions should have the same len.""" CryptoPrimitiveProvider._transfer_primitives_to_parties(op_str="test", primitives=[1], sessions=[], p_kwargs={})
def test_transfer_primitives_type_exception() -> None: with pytest.raises(ValueError): """Primitives should be a list.""" CryptoPrimitiveProvider._transfer_primitives_to_parties(op_str="test", primitives=50, sessions=[], p_kwargs={})
def test_generate_primitive_from_dict_beaver_mul(get_clients) -> None: clients = get_clients(2) session = Session(parties=clients) SessionManager.setup_mpc(session) primitive_log = { "beaver_mul": [ ( { "a_shape": (2, 10), "b_shape": (2, 10) }, { "session": session, "a_shape": (2, 10), "b_shape": (2, 10), "nr_parties": 2, }, ), ( { "a_shape": (1, 5), "b_shape": (1, 5) }, { "session": session, "a_shape": (1, 5), "b_shape": (1, 5), "nr_parties": 2, }, ), ] } CryptoPrimitiveProvider.generate_primitive_from_dict( primitive_log=primitive_log, session=session) args = primitive_log.popitem()[1] for arg in args: a_shape = arg[1].get("a_shape") b_shape = arg[1].get("b_shape") key = f"beaver_mul_{a_shape}_{b_shape}" store_client_1 = session.session_ptrs[0].crypto_store.store.get() store_client_2 = session.session_ptrs[1].crypto_store.store.get() a_shape_client_1 = tuple(store_client_1.get(key)[0][0].shape) b_shape_client_1 = tuple(store_client_1.get(key)[0][1].shape) assert a_shape == a_shape_client_1 assert b_shape == b_shape_client_1 a_shape_client_2 = tuple(store_client_2.get(key)[0][0].shape) b_shape_client_2 = tuple(store_client_2.get(key)[0][1].shape) assert a_shape == a_shape_client_2 assert b_shape == b_shape_client_2
def test_primitive_logging_model(get_clients) -> None: model = LinearNet(torch) clients = get_clients(2) session = Session(parties=clients) SessionManager.setup_mpc(session) mpc_model = model.share(session=session) x_secret = torch.randn(2, 3) x_mpc = MPCTensor(secret=x_secret, session=session) model.eval() expected_primitive_log = { "beaver_matmul": [( { "a_shape": (2, 3), "b_shape": (3, 10) }, { "session": session, "a_shape": (2, 3), "b_shape": (3, 10), "nr_parties": 2, }, )], "fss_comp": [({}, { "n_values": 20 })], "beaver_mul": [( { "a_shape": (2, 10), "b_shape": (2, 10) }, { "session": session, "a_shape": (2, 10), "b_shape": (2, 10), "nr_parties": 2, }, )], } CryptoPrimitiveProvider.start_logging() mpc_model(x_mpc) primitive_log = CryptoPrimitiveProvider.stop_logging() assert expected_primitive_log == primitive_log
def fss_op(x1: MPCTensor, x2: MPCTensor, op="eq") -> MPCTensor: """Define the workflow for a binary operation using Function Secret Sharing. Currently supported operand are = & <=, respectively corresponding to op = 'eq' and 'comp'. Args: x1 (MPCTensor): First private value. x2 (MPCTensor): Second private value. op: Type of operation to perform, should be 'eq' or 'comp'. Defaults to eq. Returns: MPCTensor: Shares of the comparison. """ assert not th.cuda.is_available() # nosec # FIXME: Better handle the case where x1 or x2 is not a MPCTensor. For the moment # FIXME: we cast it into a MPCTensor at the expense of extra communication session = x1.session dtype = session.tensor_type shape = MPCTensor._get_shape("sub", x1.shape, x2.shape) n_values = shape.numel() CryptoPrimitiveProvider.generate_primitives( f"fss_{op}", sessions=session.session_ptrs, g_kwargs={"n_values": n_values}, p_kwargs={}, ) args = zip(session.session_ptrs, x1.share_ptrs, x2.share_ptrs) args = [list(el) + [op] for el in args] shares = parallel_execution(mask_builder, session.parties)(args) # TODO: don't do .reconstruct(), this should be done remotely between the evaluators mask_value = MPCTensor(shares=shares, session=session) mask_value = mask_value.reconstruct(decode=False) % 2**n # TODO: add dtype to args args = [(session.session_ptrs[i], th.IntTensor([i]), mask_value, op) for i in range(2)] shares = parallel_execution(evaluate, session.parties)(args) response = MPCTensor(session=session, shares=shares, shape=shape) response.shape = shape return response
def test_primitive_logging_beaver_matmul(get_clients) -> None: clients = get_clients(2) session = Session(parties=clients) SessionManager.setup_mpc(session) p_kwargs = {"a_shape": (2, 3), "b_shape": (3, 10)} g_kwargs = {"a_shape": (2, 3), "b_shape": (3, 10), "nr_parties": 2} CryptoPrimitiveProvider.start_logging() CryptoPrimitiveProvider.generate_primitives( session=session, op_str="beaver_matmul", p_kwargs=p_kwargs, g_kwargs=g_kwargs, ) primitive_log = CryptoPrimitiveProvider.stop_logging() expected_log = {"beaver_matmul": [(p_kwargs, g_kwargs)]} assert expected_log == primitive_log
def test_generate_primitive_from_dict_beaver_conv2d(get_clients) -> None: clients = get_clients(2) session = Session(parties=clients) SessionManager.setup_mpc(session) primitive_log = { "beaver_conv2d": [( { "a_shape": (1, 1, 28, 28), "b_shape": (5, 1, 5, 5) }, { "session": session, "a_shape": (1, 1, 28, 28), "b_shape": (5, 1, 5, 5), "nr_parties": 2, }, )] } CryptoPrimitiveProvider.generate_primitive_from_dict( primitive_log=primitive_log, session=session) a_shape = (1, 1, 28, 28) b_shape = (5, 1, 5, 5) key = f"beaver_conv2d_{a_shape}_{b_shape}" store_client_1 = session.session_ptrs[0].crypto_store.store.get() store_client_2 = session.session_ptrs[1].crypto_store.store.get() a_shape_client_1 = tuple(store_client_1.get(key)[0][0].shape) b_shape_client_1 = tuple(store_client_1.get(key)[0][1].shape) assert a_shape == a_shape_client_1 assert b_shape == b_shape_client_1 a_shape_client_2 = tuple(store_client_2.get(key)[0][0].shape) b_shape_client_2 = tuple(store_client_2.get(key)[0][1].shape) assert a_shape == a_shape_client_2 assert b_shape == b_shape_client_2
def test_generate_primitive_from_dict_beaver_matmul(get_clients) -> None: clients = get_clients(2) session = Session(parties=clients) SessionManager.setup_mpc(session) primitive_log = { "beaver_matmul": [( { "a_shape": (2, 3), "b_shape": (3, 10) }, { "a_shape": (2, 3), "b_shape": (3, 10), "nr_parties": 2 }, )] } CryptoPrimitiveProvider.generate_primitive_from_dict( primitive_log=primitive_log, session=session) a_shape = (2, 3) b_shape = (3, 10) key = f"beaver_matmul_{a_shape}_{b_shape}" store_client_1 = session.session_ptrs[0].crypto_store.store.get() store_client_2 = session.session_ptrs[1].crypto_store.store.get() a_shape_client_1 = tuple(store_client_1.get(key)[0][0].shape) b_shape_client_1 = tuple(store_client_1.get(key)[0][1].shape) assert a_shape == a_shape_client_1 assert b_shape == b_shape_client_1 a_shape_client_2 = tuple(store_client_2.get(key)[0][0].shape) b_shape_client_2 = tuple(store_client_2.get(key)[0][1].shape) assert a_shape == a_shape_client_2 assert b_shape == b_shape_client_2
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str) -> MPCTensor: """Function that is executed by the orchestrator to multiply two secret values :return: a new set of shares that represents the multiplication between two secret values :rtype: MPCTensor """ if op_str not in EXPECTED_OPS: raise ValueError(f"{op_str} should be in {EXPECTED_OPS}") session = x.session shape_x = tuple(x.shape) shape_y = tuple(y.shape) primitives = CryptoPrimitiveProvider.generate_primitives( f"beaver_{op_str}", sessions=session.session_ptrs, g_kwargs={ "a_shape": shape_x, "b_shape": shape_y, "nr_parties": session.nr_parties, }, p_kwargs={ "a_shape": shape_x, "b_shape": shape_y }, ) a_sh, b_sh, c_sh = primitives[0] a_mpc = MPCTensor(shares=a_sh, shape=x.shape, session=session) b_mpc = MPCTensor(shares=b_sh, shape=y.shape, session=session) eps = x - a_mpc delta = y - b_mpc eps_plaintext = eps.reconstruct(decode=False) delta_plaintext = delta.reconstruct(decode=False) # Arguments that must be sent to all parties common_args = [eps_plaintext, delta_plaintext, op_str] # Specific arguments to each party args = [[el] + common_args for el in session.session_ptrs] shares = parallel_execution(mul_parties, session.parties)(args) result = MPCTensor(shares=shares, shape=c_sh[0].shape, session=session) return result
def test_primitive_logging_beaver_conv2d(get_clients) -> None: clients = get_clients(2) session = Session(parties=clients) SessionManager.setup_mpc(session) p_kwargs = {"a_shape": (1, 1, 28, 28), "b_shape": (5, 1, 5, 5)} g_kwargs = { "a_shape": (1, 1, 28, 28), "b_shape": (5, 1, 5, 5), "nr_parties": 2 } CryptoPrimitiveProvider.start_logging() CryptoPrimitiveProvider.generate_primitives( sessions=session.session_ptrs, op_str="beaver_conv2d", p_kwargs=p_kwargs, g_kwargs=g_kwargs, ) primitive_log = CryptoPrimitiveProvider.stop_logging() expected_log = {"beaver_conv2d": [(p_kwargs, g_kwargs)]} assert expected_log == primitive_log
def public_divide(x: MPCTensor, y: Union[torch.Tensor, int]) -> MPCTensor: """Function that is executed by the orchestrator to divide a secret by a public value. Args: x (MPCTensor): Private numerator. y (Union[torch.Tensor, int]): Public denominator. Returns: MPCTensor: A new set of shares that represents the division. """ session = x.session res_shape = x.shape if session.nr_parties == 2: shares = [operator.truediv(share, y) for share in x.share_ptrs] return MPCTensor(shares=shares, session=session, shape=res_shape) primitives = CryptoPrimitiveProvider.generate_primitives( "beaver_wraps", session=session, g_kwargs={ "nr_parties": session.nr_parties, "shape": res_shape }, p_kwargs=None, ) r_sh, theta_r_sh = list(zip(*list(zip(*primitives))[0])) r_mpc = MPCTensor(shares=r_sh, session=session, shape=x.shape) z = r_mpc + x z_shares_local = z.get_shares() common_args = [z_shares_local, y] args = zip( r_mpc.share_ptrs, theta_r_sh, x.share_ptrs, ) args = [list(el) + common_args for el in args] theta_x = parallel_execution(div_wraps, session.parties)(args) theta_x_plaintext = MPCTensor(shares=theta_x, session=session).reconstruct() res = x - theta_x_plaintext * 4 * ((session.ring_size // 4) // y) return res
def public_divide(x: MPCTensor, y: Union[torch.Tensor, int]) -> MPCTensor: """Function that is executed by the orchestrator to divide a secret by a value (that value is public) :return: a new set of shares that represents the multiplication between two secret values :rtype: MPCTensor """ session = x.session res_shape = x.shape if session.nr_parties == 2: shares = [operator.truediv(share, y) for share in x.share_ptrs] return MPCTensor(shares=shares, session=session, shape=res_shape) primitives = CryptoPrimitiveProvider.generate_primitives( "beaver_wraps", sessions=session.session_ptrs, g_kwargs={ "nr_parties": session.nr_parties, "shape": res_shape }, p_kwargs=None, ) r_sh, theta_r_sh = primitives[0] r_mpc = MPCTensor(shares=r_sh, session=session, shape=x.shape) z = r_mpc + x z_shares_local = z.get_shares() common_args = [z_shares_local, y] args = zip(session.session_ptrs, r_mpc.share_ptrs, theta_r_sh, x.share_ptrs) args = [list(el) + common_args for el in args] theta_x = parallel_execution(div_wraps, session.parties)(args) theta_x_plaintext = MPCTensor(shares=theta_x, session=session).reconstruct() res = x - theta_x_plaintext * 4 * ((session.ring_size // 4) // y) return res
def test_generate_primitive(get_clients: Callable, nr_parties: int, nr_instances: int) -> None: parties = get_clients(nr_parties) session = Session(parties=parties) SessionManager.setup_mpc(session) g_kwargs = {"nr_parties": nr_parties, "nr_instances": nr_instances} res = CryptoPrimitiveProvider.generate_primitives( "test", sessions=session.session_ptrs, g_kwargs=g_kwargs, p_kwargs=None, ) assert isinstance(res, list) assert len(res) == nr_parties for i, primitives in enumerate(res): for primitive in primitives: assert primitive == tuple(i for _ in range(PRIMITIVE_NR_ELEMS))
def test_exception_mul_malicious(get_clients): parties = get_clients(3) protocol = Falcon("malicious") session = Session(protocol=protocol, parties=parties) SessionManager.setup_mpc(session) x = MPCTensor(secret=1, session=session) y = MPCTensor(secret=2, session=session) shape_x = tuple(x.shape) shape_y = tuple(y.shape) p_kwargs = {"a_shape": shape_x, "b_shape": shape_y} tensor = torch.tensor([1]) primitives = CryptoPrimitiveProvider.generate_primitives( "beaver_mul", session=session, g_kwargs={ "session": session, "a_shape": shape_x, "b_shape": shape_y, "nr_parties": session.nr_parties, }, p_kwargs=p_kwargs, ) party = [0, 2] # modify the primitives of party 0,2 sess_list = [session.session_ptrs[i].get_copy() for i in party] for i, p in enumerate(party): idx = 0 if p == 0 else 1 primitives[p][0][0].shares[idx] = tensor sess_list[i].crypto_store.store = {} sess_list[i].crypto_store.populate_store("beaver_mul", primitives[p], **p_kwargs) session.session_ptrs[p] = sess_list[i].send(parties[p]) with pytest.raises(ValueError): x * y
def test_exception_init() -> None: with pytest.raises(ValueError): CryptoPrimitiveProvider()
def test_generate_primitive_exception() -> None: with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives(op_str="SyMPC", session=Session())
def mul_malicious( x: MPCTensor, y: MPCTensor, session: Session, op_str: str, ring_size: int, config: Config, **kwargs_: Dict[Any, Any], ) -> MPCTensor: """Falcon malicious multiplication. Args: x (MPCTensor): Secret y (MPCTensor): Another secret session (Session): Session the tensors belong to op_str (str): Operation string. ring_size (int) : Ring size of the underlying tensor. config (Config): The configuration(base,precision) of the underlying tensor. kwargs_ (Dict[Any, Any]): Kwargs for some operations like conv2d Returns: result(MPCTensor): Result of the operation. Raises: ValueError : If the shares are not valid. """ shape_x = tuple(x.shape) shape_y = tuple(y.shape) result = Falcon.mul_semi_honest(x, y, session, op_str, ring_size, config, reshare=True, **kwargs_) args = [list(sh) + [op_str] for sh in zip(x.share_ptrs, y.share_ptrs)] try: mask = parallel_execution(Falcon.falcon_mask, session.parties)(args) except EmptyPrimitiveStore: CryptoPrimitiveProvider.generate_primitives( f"beaver_{op_str}", session=session, g_kwargs={ "session": session, "a_shape": shape_x, "b_shape": shape_y, "nr_parties": session.nr_parties, "ring_size": ring_size, "config": config, **kwargs_, }, p_kwargs={ "a_shape": shape_x, "b_shape": shape_y }, ) mask = parallel_execution(Falcon.falcon_mask, session.parties)(args) # zip on pointers is compute intensive mask_local = [mask[idx].get() for idx in range(session.nr_parties)] eps_shares, delta_shares = zip(*mask_local) eps_plaintext = ReplicatedSharedTensor.reconstruct(eps_shares) delta_plaintext = ReplicatedSharedTensor.reconstruct(delta_shares) args = [ list(sh) + [eps_plaintext, delta_plaintext, op_str] for sh in zip(result.share_ptrs) ] triple_shares = parallel_execution(Falcon.triple_verification, session.parties)(args, kwargs_) triple = MPCTensor(shares=triple_shares, session=x.session) if (triple.reconstruct(decode=False) == 0).all(): return result else: raise ValueError("Computation Aborted: Malicious behavior.")
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str, kwargs_: Dict[Any, Any]) -> MPCTensor: """Function that is executed by the orchestrator to multiply two secret values. Args: x (MPCTensor): First value to multiply with. y (MPCTensor): Second value to multiply with. op_str (str): Operation string. kwargs_ (dict): TODO:Add docstring. Raises: ValueError: If op_str not in EXPECTED_OPS. Returns: MPCTensor: Result of the multiplication. """ if op_str not in EXPECTED_OPS: raise ValueError(f"{op_str} should be in {EXPECTED_OPS}") session = x.session shape_x = tuple(x.shape) shape_y = tuple(y.shape) CryptoPrimitiveProvider.generate_primitives( f"beaver_{op_str}", sessions=session.session_ptrs, g_kwargs={ "a_shape": shape_x, "b_shape": shape_y, "nr_parties": session.nr_parties, **kwargs_, }, p_kwargs={ "a_shape": shape_x, "b_shape": shape_y }, ) args = [ list(el) + [op_str] for el in zip(session.session_ptrs, x.share_ptrs, y.share_ptrs) ] mask = parallel_execution(spdz_mask, session.parties)(args) eps_shares, delta_shares = zip(*mask) eps = MPCTensor(shares=eps_shares, session=session) delta = MPCTensor(shares=delta_shares, session=session) eps_plaintext = eps.reconstruct(decode=False) delta_plaintext = delta.reconstruct(decode=False) # Arguments that must be sent to all parties common_args = [eps_plaintext, delta_plaintext, op_str] # Specific arguments to each party args = [[el] + common_args for el in session.session_ptrs] shares = parallel_execution(mul_parties, session.parties)(args, kwargs_) result = MPCTensor(shares=shares, session=session) return result
def fss_op(x1: MPCTensor, x2: MPCTensor, op="eq") -> MPCTensor: """Define the workflow for a binary operation using Function Secret Sharing. Currently supported operand are = & <=, respectively corresponding to op = 'eq' and 'comp'. Args: x1 (MPCTensor): First private value. x2 (MPCTensor): Second private value. op: Type of operation to perform, should be 'eq' or 'comp'. Defaults to eq. Returns: MPCTensor: Shares of the comparison. """ if th.cuda.is_available(): # FSS is currently not supported on GPU. # https://stackoverflow.com/a/62145307/8878627 # When the CUDA_VISIBLE_DEVICES environment variable is not set, # CUDA is not used even if available. Hence, we default to None cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None) os.environ["CUDA_VISIBLE_DEVICES"] = "" warnings.warn("Temporarily disabling CUDA as FSS does not support it") else: cuda_visible_devices = None # FIXME: Better handle the case where x1 or x2 is not a MPCTensor. For the moment # FIXME: we cast it into a MPCTensor at the expense of extra communication session = x1.session shape = MPCTensor._get_shape("sub", x1.shape, x2.shape) n_values = shape.numel() CryptoPrimitiveProvider.generate_primitives( f"fss_{op}", sessions=session.session_ptrs, g_kwargs={"n_values": n_values}, p_kwargs={}, ) args = zip(session.session_ptrs, x1.share_ptrs, x2.share_ptrs) args = [list(el) + [op] for el in args] shares = parallel_execution(mask_builder, session.parties)(args) # TODO: don't do .reconstruct(), this should be done remotely between the evaluators mask_value = MPCTensor(shares=shares, session=session) mask_value = mask_value.reconstruct(decode=False) % 2**n # TODO: add dtype to args args = [(session.session_ptrs[i], th.IntTensor([i]), mask_value, op) for i in range(2)] shares = parallel_execution(evaluate, session.parties)(args) response = MPCTensor(session=session, shares=shares, shape=shape) response.shape = shape if cuda_visible_devices is not None: os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices return response
def test_register_primitive() -> None: val = CryptoPrimitiveProvider.get_state() expected_providers = "test" assert expected_providers in val, "Test Provider not registered"
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str, kwargs_: Dict[Any, Any]) -> MPCTensor: """Function that is executed by the orchestrator to multiply two secret values. Args: x (MPCTensor): First value to multiply with. y (MPCTensor): Second value to multiply with. op_str (str): Operation string. kwargs_ (dict): TODO:Add docstring. Raises: ValueError: If op_str not in EXPECTED_OPS. Returns: MPCTensor: Result of the multiplication. """ if op_str not in EXPECTED_OPS: raise ValueError(f"{op_str} should be in {EXPECTED_OPS}") session = x.session shape_x = tuple(x.shape) shape_y = tuple(y.shape) args = [list(el) + [op_str] for el in zip(x.share_ptrs, y.share_ptrs)] try: mask = parallel_execution(spdz_mask, session.parties)(args) except EmptyPrimitiveStore: CryptoPrimitiveProvider.generate_primitives( f"beaver_{op_str}", session=session, g_kwargs={ "session": session, "a_shape": shape_x, "b_shape": shape_y, "nr_parties": session.nr_parties, **kwargs_, }, p_kwargs={ "a_shape": shape_x, "b_shape": shape_y }, ) mask = parallel_execution(spdz_mask, session.parties)(args) # zip on pointers is compute intensive mask_local = [mask[idx].get() for idx in range(session.nr_parties)] eps_shares, delta_shares = zip(*mask_local) eps_plaintext = ShareTensor.reconstruct(eps_shares) delta_plaintext = ShareTensor.reconstruct(delta_shares) # Specific arguments to each party args = [[str(remote_session_uuid), eps_plaintext, delta_plaintext, op_str] for remote_session_uuid in session.rank_to_uuid.values()] shares = parallel_execution(mul_parties, session.parties)(args, kwargs_) result = MPCTensor(shares=shares, session=session) return result