Ejemplo n.º 1
0
    def share(
        self,
        *owners: List[BaseWorker],
        field: Union[int, None] = None,
        crypto_provider: Union[BaseWorker, None] = None,
        requires_grad: bool = False,
        no_wrap: bool = False,
    ):
        """This is a pass through method which calls .share on the child.

        Args:
            owners (list): A list of BaseWorker objects determining who to send shares to.
            field (int or None): The arithmetic field where live the shares.
            crypto_provider (BaseWorker or None): The worker providing the crypto primitives.
            requires_grad (bool): Should we add AutogradTensor to allow gradient computation,
                default is False.
        """
        if crypto_provider is None:
            warnings.warn(
                "'crypto_provider' should not be None. Please provide a dedicated worker, which will act "
                "as a trusted third party and will provide the crypto primitives needed for Multi Party "
                "Computation.")
        elif crypto_provider.is_client_worker:
            warnings.warn("client worker cannot be a crypto_provider.")

        if self.has_child():
            chain = self.child

            kwargs = ({
                "requires_grad": requires_grad
            } if isinstance(chain, syft.PointerTensor) else {})
            shared_tensor = chain.share(*owners,
                                        field=field,
                                        crypto_provider=crypto_provider,
                                        **kwargs)
        else:
            if self.type() == "torch.FloatTensor":
                raise TypeError(
                    "FloatTensor cannot be additively shared, Use fix_precision."
                )

            shared_tensor = (syft.AdditiveSharingTensor(
                field=field, crypto_provider=crypto_provider,
                owner=self.owner).on(self.copy(),
                                     wrap=False).init_shares(*owners))

        if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):
            shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)

        if not no_wrap:
            shared_tensor = shared_tensor.wrap()

        return shared_tensor
Ejemplo n.º 2
0
    def share(
        self,
        *owners: List[BaseWorker],
        field: Union[int, None] = None,
        dtype: Union[str, None] = None,
        crypto_provider: Union[BaseWorker, None] = None,
        requires_grad: bool = False,
        no_wrap: bool = False,
    ):
        """This is a pass through method which calls .share on the child.

        Args:
            owners (list): A list of BaseWorker objects determining who to send shares to.
            field (int or None): The arithmetic field where live the shares.
            dtype (str or None): The dtype of shares
            crypto_provider (BaseWorker or None): The worker providing the crypto primitives.
            requires_grad (bool): Should we add AutogradTensor to allow gradient computation,
                default is False.
        """
        if self.has_child():
            chain = self.child

            kwargs_ = ({
                "requires_grad": requires_grad
            } if isinstance(chain, syft.PointerTensor) else {})
            shared_tensor = chain.share(*owners,
                                        field=field,
                                        dtype=dtype,
                                        crypto_provider=crypto_provider,
                                        **kwargs_)
        else:
            if self.type() == "torch.FloatTensor":
                raise TypeError(
                    "FloatTensor cannot be additively shared, Use fix_precision."
                )

            shared_tensor = (syft.AdditiveSharingTensor(
                field=field,
                dtype=dtype,
                crypto_provider=crypto_provider,
                owner=self.owner).on(self.copy(),
                                     wrap=False).init_shares(*owners))

        if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):
            shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)

        if not no_wrap:
            shared_tensor = shared_tensor.wrap()

        return shared_tensor
Ejemplo n.º 3
0
    def share(
        self,
        *owners: List[BaseWorker],
        field: Union[int, None] = None,
        crypto_provider: Union[BaseWorker, None] = None,
        requires_grad: bool = False,
        no_wrap: bool = False,
    ):
        """This is a pass through method which calls .share on the child.

        Args:
            owners (list): A list of BaseWorker objects determining who to send shares to.
            field (int or None): The arithmetic field where live the shares.
            crypto_provider (BaseWorker or None): The worker providing the crypto primitives.
            requires_grad (bool): Should we add AutogradTensor to allow gradient computation,
                default is False.
        """

        if self.has_child():
            chain = self.child.copy()
            chain.owner = self.child.owner

            kwargs = (
                {"requires_grad": requires_grad} if isinstance(chain, syft.PointerTensor) else {}
            )
            shared_tensor = chain.share(
                *owners, field=field, crypto_provider=crypto_provider, **kwargs
            )
        else:
            shared_tensor = (
                syft.AdditiveSharingTensor(
                    field=field, crypto_provider=crypto_provider, owner=self.owner
                )
                .on(self.copy())
                .child.init_shares(*owners)
            )

        if not no_wrap:
            shared_tensor = shared_tensor.wrap()

        if requires_grad and not (
            shared_tensor.is_wrapper and isinstance(shared_tensor.child, syft.PointerTensor)
        ):
            shared_tensor = syft.AutogradTensor().on(shared_tensor)

        return shared_tensor
Ejemplo n.º 4
0
def fss_op(x1, x2, op="eq"):
    """
    Define the workflow for a binary operation using Function Secret Sharing

    Currently supported operand are = & <=, respectively corresponding to
    op = 'eq' and 'comp'

    Args:
        x1: first AST
        x2: second AST
        op: type of operation to perform, should be 'eq' or 'comp'

    Returns:
        shares of the comparison
    """
    if isinstance(x1, sy.AdditiveSharingTensor):
        locations = x1.locations
        class_attributes = x1.get_class_attributes()
    else:
        locations = x2.locations
        class_attributes = x2.get_class_attributes()

    dtype = class_attributes.get("dtype")
    asynchronous = isinstance(locations[0], WebsocketClientWorker)

    workers_args = [
        (
            x1.child[location.id]
            if isinstance(x1, sy.AdditiveSharingTensor)
            else (x1 if i == 0 else 0),
            x2.child[location.id]
            if isinstance(x2, sy.AdditiveSharingTensor)
            else (x2 if i == 0 else 0),
            op,
        )
        for i, location in enumerate(locations)
    ]

    try:
        shares = []
        for i, location in enumerate(locations):
            share = remote(mask_builder, location=location)(*workers_args[i], return_value=True)
            shares.append(share)
    except EmptyCryptoPrimitiveStoreError as e:
        if sy.local_worker.crypto_store.force_preprocessing:
            raise
        sy.local_worker.crypto_store.provide_primitives(workers=locations, **e.kwargs_)
        return fss_op(x1, x2, op)

    # async has a cost which is too expensive for this command
    # shares = asyncio.run(sy.local_worker.async_dispatch(
    #     workers=locations,
    #     commands=[
    #         (full_name(mask_builder), None, workers_args[i], {})
    #         for i in [0, 1]
    #     ],
    #     return_value=True
    # ))

    mask_value = sum(shares) % 2 ** n

    for location, share in zip(locations, shares):
        location.de_register_obj(share)
        del share

    workers_args = [(th.IntTensor([i]), mask_value, op, dtype) for i in range(2)]
    if not asynchronous:
        shares = []
        for i, location in enumerate(locations):
            share = remote(evaluate, location=location)(*workers_args[i], return_value=False)
            shares.append(share)
    else:
        print("async")
        shares = asyncio.run(
            sy.local_worker.async_dispatch(
                workers=locations,
                commands=[(full_name(evaluate), None, workers_args[i], {}) for i in [0, 1]],
            )
        )

    shares = {loc.id: share for loc, share in zip(locations, shares)}

    response = sy.AdditiveSharingTensor(shares, **class_attributes)
    return response
Ejemplo n.º 5
0
def build_triple(
    op: str,
    shape: Tuple[th.Size, th.Size],
    n_workers: int,
    n_instances: int,
    torch_dtype: th.dtype,
    field: int,
):
    """
    Generates and shares a multiplication triple (a, b, c)

    Args:
        op (str): 'mul' or 'matmul': the op ° which ensures a ° b = c
        shape (Tuple[th.Size, th.Size]): the shapes of a and b
        n_workers (int): number of workers
        n_instances (int): the number of tuples (works only for mul: there is a
            shape issue for matmul which could be addressed)
        torch_dtype (th.dtype): the type of the shares
        field (int): the field for the randomness

    Returns:
        a triple of shares (a_sh, b_sh, c_sh) per worker where a_sh is a share of a
    """
    left_shape, right_shape = shape
    cmd = getattr(th, op)
    low_bound, high_bound = -(field // 2), (field - 1) // 2
    if op == "matmul":
        cmd = my_matmul
    if op == "mul":
        cmd = my_mul
    a = th.randint(low_bound, high_bound, (n_instances, *left_shape), dtype=torch_dtype).to(device)
    b = th.randint(low_bound, high_bound, (n_instances, *right_shape), dtype=torch_dtype).to(device)
    # hhk : some where the field is wrong
    a = int48module(a)
    b = int48module(b)
    

    if op == "mul" and b.numel() == a.numel():
        # examples:
        #   torch.tensor([3]) * torch.tensor(3) = tensor([9])
        #   torch.tensor([3]) * torch.tensor([[3]]) = tensor([[9]])
        if len(a.shape) == len(b.shape):
            c = cmd(a, b)
        elif len(a.shape) > len(b.shape):
            shape = b.shape
            b = b.reshape_as(a)
            c = cmd(a, b)
            b = b.reshape(*shape)
        else:  # len(a.shape) < len(b.shape):
            shape = a.shape
            a = a.reshape_as(b)
            c = cmd(a, b)
            a = a.reshape(*shape)
    else:
        c = cmd(a, b)

    helper = sy.AdditiveSharingTensor(field=field)
    # helper_c = sy.AdditiveSharingTensor(field=field*field)#hhk

    shares_worker = [[0, 0, 0] for _ in range(n_workers)]
    # for i, tensor in enumerate([a, b, c]):
    #     # hhk
    #     if i == 2:
    #         shares = helper_c.generate_shares(secret=tensor, n_workers=n_workers, random_type=torch_dtype)
    #         for w_id in range(n_workers):
    #             shares_worker[w_id][i] = shares[w_id]
    #     else:
    #         shares = helper.generate_shares(secret=tensor, n_workers=n_workers, random_type=torch_dtype)
    #         for w_id in range(n_workers):
    #             shares_worker[w_id][i] = shares[w_id]
    for i, tensor in enumerate([a, b, c]):
        shares = helper.generate_shares(secret=tensor, n_workers=n_workers, random_type=torch_dtype)
        for w_id in range(n_workers):
            shares_worker[w_id][i] = shares[w_id]

    return shares_worker
Ejemplo n.º 6
0
def spdz_mul(cmd, x, y, crypto_provider, dtype, torch_dtype, field):
    """Abstractly multiplies two tensors (mul or matmul)
    Args:
        cmd: a callable of the equation to be computed (mul or matmul)
        x (AdditiveSharingTensor): the left part of the operation
        y (AdditiveSharingTensor): the right part of the operation
        crypto_provider (AbstractWorker): an AbstractWorker which is used
            to generate triples
        dtype (str): denotes the dtype of the shares, should be 'long' (default),
            'int' or 'custom'
        torch_dtype (torch.dtype): the real type of the shares, should be th.int64
            (default) or th.int32
        field (int): an integer denoting the size of the field, default is 2**64
    Return:
        an AdditiveSharingTensor
    """

    op = cmd
    locations = x.locations
    # Experimental results don't show real improvements with asynchronous = True
    asynchronous = False  # isinstance(locations[0], WebsocketClientWorker)

    try:
        shares_delta, shares_epsilon = [], []
        for location in locations:
            args = (x.child[location.id], y.child[location.id], op, dtype,
                    torch_dtype, field, location)
            share_delta, share_epsilon = remote(spdz_mask, location=location)(
                *args, return_value=True, return_arity=2)
            shares_delta.append(share_delta)
            shares_epsilon.append(share_epsilon)
    except EmptyCryptoPrimitiveStoreError as e:
        if sy.local_worker.crypto_store.force_preprocessing:
            raise
        crypto_provider.crypto_store.provide_primitives(workers=locations,
                                                        **e.kwargs_)
        return spdz_mul(cmd, x, y, crypto_provider, dtype, torch_dtype, field)

    delta = sum(shares_delta)
    epsilon = sum(shares_epsilon)

    for location, share_delta, share_epsilon in zip(locations, shares_delta,
                                                    shares_epsilon):
        location.de_register_obj(share_delta)
        location.de_register_obj(share_epsilon)
        del share_delta
        del share_epsilon

    if not asynchronous:
        shares = []
        for i, location in enumerate(locations):
            args = (th.LongTensor([i]), delta, epsilon, op, dtype, torch_dtype,
                    field, location)
            share = remote(spdz_compute, location=location)(*args,
                                                            return_value=False)
            shares.append(share)
    else:
        shares = asyncio.run(
            sy.local_worker.async_dispatch(
                workers=locations,
                commands=[(
                    full_name(spdz_compute),
                    None,
                    (th.LongTensor([i]), delta, epsilon, op),
                    {},
                ) for i in [0, 1]],
                return_value=False,
            ))

    shares = {loc.id: share for loc, share in zip(locations, shares)}

    response = sy.AdditiveSharingTensor(shares, **x.get_class_attributes())
    return response
Ejemplo n.º 7
0
def _pool2d(input,
            kernel_size: int = 2,
            stride: int = 2,
            padding=0,
            dilation=1,
            ceil_mode=None,
            mode="avg"):
    if isinstance(kernel_size, tuple):
        assert kernel_size[0] == kernel_size[1]
        kernel_size = kernel_size[0]
    if isinstance(stride, tuple):
        assert stride[0] == stride[1]
        stride = stride[0]

    input_fp = input
    input = input.child

    locations = input.locations

    im_reshaped_shares = {}
    params = {}
    for location in locations:
        input_share = input.child[location.id]
        im_reshaped_shares[location.id], *params[location.id] = remote(
            _pre_pool, location=location)(input_share,
                                          kernel_size,
                                          stride,
                                          padding,
                                          dilation,
                                          return_value=False,
                                          return_arity=6)

    im_reshaped = sy.AdditiveSharingTensor(im_reshaped_shares,
                                           **input.get_class_attributes())

    if mode == "max":
        # We have optimisations when the kernel is small, namely a square of size 2 or 3
        # to reduce the number of rounds and the total number of comparisons.
        # See more in Appendice C.3 https://arxiv.org/pdf/2006.04593.pdf
        def max_half_split(tensor4d, half_size):
            """
            Split the tensor on 2 halves on the last dim and return the maximum half
            """
            left, right = tensor4d[:, :, :, :half_size], tensor4d[:, :, :,
                                                                  half_size:]
            max_half = left + (right >= left) * (right - left)
            return max_half

        if im_reshaped.shape[-1] == 4:
            # Compute the max as a binary tree: 2 steps are needed for 4 values
            res = max_half_split(im_reshaped, 2)
            res = max_half_split(res, 1)
        elif im_reshaped.shape[-1] == 9:
            # For 9 values we need 4 steps: we process the 8 first values and then
            # compute the max with the 9th value
            res = max_half_split(im_reshaped[:, :, :, :8], 4)
            res = max_half_split(res, 2)
            left = max_half_split(res, 1)
            right = im_reshaped[:, :, :, 8:]
            res = left + (right >= left) * (right - left)
        else:
            res = im_reshaped.max(dim=-1)
    elif mode == "avg":
        res = im_reshaped.mean(dim=-1)
    else:
        raise ValueError(f"In pool2d, mode should be avg or max, not {mode}.")

    res_shares = {}
    for location in locations:
        res_share = res.child[location.id]
        res_share = remote(_post_pool, location=location)(res_share,
                                                          *params[location.id])
        res_shares[location.id] = res_share

    result_fp = sy.FixedPrecisionTensor(**input_fp.get_class_attributes()).on(
        sy.AdditiveSharingTensor(res_shares, **res.get_class_attributes()),
        wrap=False)
    return result_fp
Ejemplo n.º 8
0
def conv2d(input,
           weight,
           bias=None,
           stride=1,
           padding=0,
           dilation=1,
           groups=1):
    """
    Overloads torch.nn.functional.conv2d to be able to use MPC on convolutional networks.
    The idea is to unroll the input and weight matrices to compute a
    matrix multiplication equivalent to the convolution.
    Args:
        input: input image
        weight: convolution kernels
        bias: optional additive bias
        stride: stride of the convolution kernels
        padding:  implicit paddings on both sides of the input.
        dilation: spacing between kernel elements
        groups: split input into groups, in_channels should be divisible by the number of groups
    Returns:
        the result of the convolution as a fixed precision tensor.
    """
    input_fp, weight_fp = input, weight

    if isinstance(input.child, FrameworkTensor) or isinstance(
            weight.child, FrameworkTensor):
        assert isinstance(input.child, FrameworkTensor)
        assert isinstance(weight.child, FrameworkTensor)
        im_reshaped, weight_reshaped, *params = _pre_conv(
            input, weight, bias, stride, padding, dilation, groups)
        if groups > 1:
            res = []
            chunks_im = torch.chunk(im_reshaped, groups, dim=2)
            chunks_weights = torch.chunk(weight_reshaped, groups, dim=0)
            for g in range(groups):
                tmp = chunks_im[g].matmul(chunks_weights[g])
                res.append(tmp)
            result = torch.cat(res, dim=2)
        else:
            result = im_reshaped.matmul(weight_reshaped)
        result = _post_conv(bias, result, *params)
        return result.wrap()

    input, weight = input.child, weight.child

    if bias is not None:
        bias = bias.child
        assert isinstance(
            bias, sy.AdditiveSharingTensor
        ), "Have you provided bias as a kwarg? If so, please remove `bias=`."

    locations = input.locations

    im_reshaped_shares = {}
    weight_reshaped_shares = {}
    params = {}
    for location in locations:
        input_share = input.child[location.id]
        weight_share = weight.child[location.id]
        bias_share = bias.child[location.id] if bias is not None else None
        (
            im_reshaped_shares[location.id],
            weight_reshaped_shares[location.id],
            *params[location.id],
        ) = remote(_pre_conv, location=location)(
            input_share,
            weight_share,
            bias_share,
            stride,
            padding,
            dilation,
            groups,
            return_value=False,
            return_arity=6,
        )

    im_reshaped = sy.FixedPrecisionTensor(
        **input_fp.get_class_attributes()).on(sy.AdditiveSharingTensor(
            im_reshaped_shares, **input.get_class_attributes()),
                                              wrap=False)
    weight_reshaped = sy.FixedPrecisionTensor(
        **weight_fp.get_class_attributes()).on(sy.AdditiveSharingTensor(
            weight_reshaped_shares, **input.get_class_attributes()),
                                               wrap=False)

    # Now that everything is set up, we can compute the convolution as a simple matmul
    if groups > 1:
        res = []
        chunks_im = torch.chunk(im_reshaped, groups, dim=2)
        chunks_weights = torch.chunk(weight_reshaped, groups, dim=0)
        for g in range(groups):
            tmp = chunks_im[g].matmul(chunks_weights[g])
            res.append(tmp)
        res_fp = torch.cat(res, dim=2)
        res = res_fp.child
    else:
        res_fp = im_reshaped.matmul(weight_reshaped)
        res = res_fp.child

    # and then we reshape the result
    res_shares = {}
    for location in locations:
        bias_share = bias.child[location.id] if bias is not None else None
        res_share = res.child[location.id]
        res_share = remote(_post_conv,
                           location=location)(bias_share, res_share,
                                              *params[location.id])
        res_shares[location.id] = res_share

    result_fp = sy.FixedPrecisionTensor(**res_fp.get_class_attributes()).on(
        sy.AdditiveSharingTensor(res_shares, **res.get_class_attributes()),
        wrap=False)
    return result_fp
Ejemplo n.º 9
0
def fss_op(x1, x2, type_op="eq"):
    """
    Define the workflow for a binary operation using Function Secret Sharing

    Currently supported operand are = & <=, respectively corresponding to
    type_op = 'eq' and 'comp'

    Args:
        x1: first AST
        x2: second AST
        type_op: type of operation to perform, should be 'eq' or 'comp'

    Returns:
        shares of the comparison
    """

    me = sy.local_worker
    locations = x1.locations

    shares = []
    for location in locations:
        args = (x1.child[location.id], x2.child[location.id])
        share = request_run_plan(me,
                                 f"#fss_{type_op}_plan_1",
                                 location,
                                 return_value=True,
                                 args=args)
        shares.append(share)

    mask_value = sum(shares) % 2**n

    shares = []
    for i, location in enumerate(locations):
        args = (th.IntTensor([i]), mask_value)
        share = request_run_plan(me,
                                 f"#fss_{type_op}_plan_2",
                                 location,
                                 return_value=False,
                                 args=args)
        shares.append(share)

    if type_op == "comp":
        prev_shares = shares
        shares = []
        for prev_share, location in zip(prev_shares, locations):
            share = request_run_plan(me,
                                     "#xor_add_1",
                                     location,
                                     return_value=True,
                                     args=(prev_share, ))
            shares.append(share)

        masked_value = shares[0] ^ shares[1]  # TODO case >2 workers ?

        shares = {}
        for i, prev_share, location in zip(range(len(locations)), prev_shares,
                                           locations):
            share = request_run_plan(
                me,
                "#xor_add_2",
                location,
                return_value=False,
                args=(th.IntTensor([i]), masked_value),
            )
            shares[location.id] = share
    else:
        shares = {loc.id: share for loc, share in zip(locations, shares)}

    response = sy.AdditiveSharingTensor(shares, **x1.get_class_attributes())
    return response