Example #1
0
    def compute_zvalue_and_add_mask(
        x: ReplicatedSharedTensor,
        y: ReplicatedSharedTensor,
        op_str: str,
        **kwargs: Dict[Any, Any],
    ) -> torch.Tensor:
        """Operation to compute local z share and add mask to it.

        Args:
            x (ReplicatedSharedTensor): Secret.
            y (ReplicatedSharedTensor): Another secret.
            op_str (str): Operation string.
            kwargs (Dict[Any, Any]): Kwargs for some operations like conv2d

        Returns:
            share (Torch.tensor): The masked local z share.
        """
        # Parties calculate z value locally
        session = get_session(x.session_uuid)
        z_value = Falcon.multiplication_protocol(x, y, op_str, **kwargs)
        shape = MPCTensor._get_shape(op_str, x.shape, y.shape)
        przs_mask = session.przs_generate_random_share(shape=shape,
                                                       ring_size=str(
                                                           x.ring_size))
        # Add PRZS Mask to z  value
        op = ReplicatedSharedTensor.get_op(x.ring_size, "add")
        share = op(z_value, przs_mask.get_shares()[0])

        return share
Example #2
0
def fss_op(x1: MPCTensor, x2: MPCTensor, op="eq") -> MPCTensor:
    """Define the workflow for a binary operation using Function Secret Sharing.

    Currently supported operand are = & <=, respectively corresponding to
    op = 'eq' and 'comp'.

    Args:
        x1 (MPCTensor): First private value.
        x2 (MPCTensor): Second private value.
        op: Type of operation to perform, should be 'eq' or 'comp'. Defaults to eq.

    Returns:
        MPCTensor: Shares of the comparison.
    """
    assert not th.cuda.is_available()  # nosec

    # FIXME: Better handle the case where x1 or x2 is not a MPCTensor. For the moment
    # FIXME: we cast it into a MPCTensor at the expense of extra communication
    session = x1.session
    dtype = session.tensor_type

    shape = MPCTensor._get_shape("sub", x1.shape, x2.shape)
    n_values = shape.numel()

    CryptoPrimitiveProvider.generate_primitives(
        f"fss_{op}",
        sessions=session.session_ptrs,
        g_kwargs={"n_values": n_values},
        p_kwargs={},
    )

    args = zip(session.session_ptrs, x1.share_ptrs, x2.share_ptrs)
    args = [list(el) + [op] for el in args]

    shares = parallel_execution(mask_builder, session.parties)(args)

    # TODO: don't do .reconstruct(), this should be done remotely between the evaluators
    mask_value = MPCTensor(shares=shares, session=session)
    mask_value = mask_value.reconstruct(decode=False) % 2**n

    # TODO: add dtype to args
    args = [(session.session_ptrs[i], th.IntTensor([i]), mask_value, op)
            for i in range(2)]

    shares = parallel_execution(evaluate, session.parties)(args)

    response = MPCTensor(session=session, shares=shares, shape=shape)
    response.shape = shape
    return response
Example #3
0
    def mul_semi_honest(
        x: MPCTensor,
        y: MPCTensor,
        session: Session,
        op_str: str,
        ring_size: int,
        config: Config,
        reshare: bool = False,
        **kwargs_: Dict[Any, Any],
    ) -> MPCTensor:
        """Falcon semihonest multiplication.

        Performs Falcon's mul implementation, add masks and performs resharing.

        Args:
            x (MPCTensor): Secret
            y (MPCTensor): Another secret
            session (Session): Session the tensors belong to
            op_str (str): Operation string.
            ring_size (int) : Ring size of the underlying tensors.
            config (Config): The configuration(base,precision) of the underlying tensor.
            reshare (bool) : Convert 3-out-3 to 2-out-3 if set.
            kwargs_ (Dict[Any, Any]): Kwargs for some operations like conv2d

        Returns:
            MPCTensor: Result of the operation.
        """
        args = [[x_share, y_share, op_str]
                for x_share, y_share in zip(x.share_ptrs, y.share_ptrs)]

        z_shares_ptrs = parallel_execution(Falcon.compute_zvalue_and_add_mask,
                                           session.parties)(args, kwargs_)

        result = MPCTensor(shares=z_shares_ptrs, session=x.session)

        if reshare:
            z_shares = [share.get() for share in z_shares_ptrs]

            # Convert 3-3 shares to 2-3 shares by resharing
            reshared_shares = ReplicatedSharedTensor.distribute_shares(
                z_shares, x.session, ring_size, config)
            result = MPCTensor(shares=reshared_shares, session=x.session)

        result.shape = MPCTensor._get_shape(op_str, x.shape,
                                            y.shape)  # for prrs
        return result
Example #4
0
def test_get_shape_none() -> None:
    with pytest.raises(ValueError):
        MPCTensor._get_shape("mul", None, None)
Example #5
0
def fss_op(x1: MPCTensor, x2: MPCTensor, op="eq") -> MPCTensor:
    """Define the workflow for a binary operation using Function Secret Sharing.

    Currently supported operand are = & <=, respectively corresponding to
    op = 'eq' and 'comp'.

    Args:
        x1 (MPCTensor): First private value.
        x2 (MPCTensor): Second private value.
        op: Type of operation to perform, should be 'eq' or 'comp'. Defaults to eq.

    Returns:
        MPCTensor: Shares of the comparison.
    """
    if th.cuda.is_available():
        # FSS is currently not supported on GPU.
        # https://stackoverflow.com/a/62145307/8878627

        # When the CUDA_VISIBLE_DEVICES environment variable is not set,
        # CUDA is not used even if available. Hence, we default to None
        cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
        os.environ["CUDA_VISIBLE_DEVICES"] = ""
        warnings.warn("Temporarily disabling CUDA as FSS does not support it")
    else:
        cuda_visible_devices = None

    # FIXME: Better handle the case where x1 or x2 is not a MPCTensor. For the moment
    # FIXME: we cast it into a MPCTensor at the expense of extra communication
    session = x1.session

    shape = MPCTensor._get_shape("sub", x1.shape, x2.shape)
    n_values = shape.numel()

    CryptoPrimitiveProvider.generate_primitives(
        f"fss_{op}",
        sessions=session.session_ptrs,
        g_kwargs={"n_values": n_values},
        p_kwargs={},
    )

    args = zip(session.session_ptrs, x1.share_ptrs, x2.share_ptrs)
    args = [list(el) + [op] for el in args]

    shares = parallel_execution(mask_builder, session.parties)(args)

    # TODO: don't do .reconstruct(), this should be done remotely between the evaluators
    mask_value = MPCTensor(shares=shares, session=session)
    mask_value = mask_value.reconstruct(decode=False) % 2**n

    # TODO: add dtype to args
    args = [(session.session_ptrs[i], th.IntTensor([i]), mask_value, op)
            for i in range(2)]

    shares = parallel_execution(evaluate, session.parties)(args)

    response = MPCTensor(session=session, shares=shares, shape=shape)
    response.shape = shape

    if cuda_visible_devices is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices

    return response