コード例 #1
0
ファイル: przs.py プロジェクト: venktesh-bolla/PySyft
    def setup(workers):
        seed_max = 2 ** 32
        paired_workers = list(zip(workers, workers[1:]))
        paired_workers.append((workers[-1], workers[0]))

        workers_ptr = defaultdict(dict)

        for cur_worker, next_worker in paired_workers:
            if cur_worker == syft.local_worker:
                ptr = cur_worker.torch.randint(-seed_max, seed_max - 1, size=(1,))
                ptr_next = ptr.send(next_worker)
            else:
                ptr = cur_worker.remote.torch.randint(-seed_max, seed_max - 1, size=(1,))
                ptr_next = ptr.copy().move(next_worker)

            workers_ptr[cur_worker]["cur_seed"] = ptr
            workers_ptr[next_worker]["prev_seed"] = ptr_next

        for worker, seeds in workers_ptr.items():
            cur_seed = seeds["cur_seed"]
            prev_seed = seeds["prev_seed"]
            if worker == syft.local_worker:
                func = _initialize_generators
            else:
                func = remote(_initialize_generators, location=worker)
            func(cur_seed, prev_seed)
コード例 #2
0
def gen_alpha_3of3(worker):
    func = None
    if worker == syft.local_worker:
        func = _generate_alpha_3of3
    else:
        func = remote(_generate_alpha_3of3, location=worker)

    return func(worker.id)
コード例 #3
0
def get_random(name_generator, shape, worker):
    func = None
    if worker == syft.local_worker:
        func = _get_random_tensor(name_generator, shape, worker.id)
    else:
        func = remote(_get_random_tensor, location=worker)

    return func(name_generator, shape, worker.id)
コード例 #4
0
 def setup(cls, players):
     seed_map = cls.generate_and_share_seeds(players)
     for worker, seeds in seed_map.items():
         if worker == syft.local_worker:
             initialize_generators = _initialize_generators
         else:
             initialize_generators = remote(_initialize_generators,
                                            location=worker)
         initialize_generators(*seeds)
コード例 #5
0
ファイル: test_attributes.py プロジェクト: znbdata/PySyft
def test_remote(workers, return_value):
    alice = workers["alice"]

    x = th.tensor([1.0])
    expected = my_awesome_computation(x)

    p = x.send(alice)
    args = (p,)
    results = remote(my_awesome_computation, location=alice)(
        *args, return_value=return_value, return_arity=2
    )

    if not return_value:
        results = tuple(result.get() for result in results)

    assert results == expected
コード例 #6
0
ファイル: test_attributes.py プロジェクト: znbdata/PySyft
def test_remote_wrong_arity(workers, return_value):
    """
    Identical to test_remote except the use didn't set return_arity to
    be the correct number of return values.
    Here it should be 2, not 1.
    """
    alice = workers["alice"]

    x = th.tensor([1.0])
    expected = my_awesome_computation(x)

    p = x.send(alice)
    args = (p,)
    results = remote(my_awesome_computation, location=alice)(
        *args, return_value=return_value, return_arity=1
    )

    if not return_value:
        results = tuple(result.get() for result in results)

    assert results == expected
コード例 #7
0
ファイル: fss.py プロジェクト: venktesh-bolla/PySyft
def fss_op(x1, x2, op="eq"):
    """
    Define the workflow for a binary operation using Function Secret Sharing

    Currently supported operand are = & <=, respectively corresponding to
    op = 'eq' and 'comp'

    Args:
        x1: first AST
        x2: second AST
        op: type of operation to perform, should be 'eq' or 'comp'

    Returns:
        shares of the comparison
    """
    if isinstance(x1, sy.AdditiveSharingTensor):
        locations = x1.locations
        class_attributes = x1.get_class_attributes()
    else:
        locations = x2.locations
        class_attributes = x2.get_class_attributes()

    dtype = class_attributes.get("dtype")
    asynchronous = isinstance(locations[0], WebsocketClientWorker)

    workers_args = [
        (
            x1.child[location.id]
            if isinstance(x1, sy.AdditiveSharingTensor)
            else (x1 if i == 0 else 0),
            x2.child[location.id]
            if isinstance(x2, sy.AdditiveSharingTensor)
            else (x2 if i == 0 else 0),
            op,
        )
        for i, location in enumerate(locations)
    ]

    try:
        shares = []
        for i, location in enumerate(locations):
            share = remote(mask_builder, location=location)(*workers_args[i], return_value=True)
            shares.append(share)
    except EmptyCryptoPrimitiveStoreError as e:
        if sy.local_worker.crypto_store.force_preprocessing:
            raise
        sy.local_worker.crypto_store.provide_primitives(workers=locations, **e.kwargs_)
        return fss_op(x1, x2, op)

    # async has a cost which is too expensive for this command
    # shares = asyncio.run(sy.local_worker.async_dispatch(
    #     workers=locations,
    #     commands=[
    #         (full_name(mask_builder), None, workers_args[i], {})
    #         for i in [0, 1]
    #     ],
    #     return_value=True
    # ))

    mask_value = sum(shares) % 2 ** n

    for location, share in zip(locations, shares):
        location.de_register_obj(share)
        del share

    workers_args = [(th.IntTensor([i]), mask_value, op, dtype) for i in range(2)]
    if not asynchronous:
        shares = []
        for i, location in enumerate(locations):
            share = remote(evaluate, location=location)(*workers_args[i], return_value=False)
            shares.append(share)
    else:
        print("async")
        shares = asyncio.run(
            sy.local_worker.async_dispatch(
                workers=locations,
                commands=[(full_name(evaluate), None, workers_args[i], {}) for i in [0, 1]],
            )
        )

    shares = {loc.id: share for loc, share in zip(locations, shares)}

    response = sy.AdditiveSharingTensor(shares, **class_attributes)
    return response
コード例 #8
0
ファイル: spdz.py プロジェクト: Hhkai/syftgpu
def spdz_mul(cmd, x, y, crypto_provider, dtype, torch_dtype, field):
    """Abstractly multiplies two tensors (mul or matmul)
    Args:
        cmd: a callable of the equation to be computed (mul or matmul)
        x (AdditiveSharingTensor): the left part of the operation
        y (AdditiveSharingTensor): the right part of the operation
        crypto_provider (AbstractWorker): an AbstractWorker which is used
            to generate triples
        dtype (str): denotes the dtype of the shares, should be 'long' (default),
            'int' or 'custom'
        torch_dtype (torch.dtype): the real type of the shares, should be th.int64
            (default) or th.int32
        field (int): an integer denoting the size of the field, default is 2**64
    Return:
        an AdditiveSharingTensor
    """

    op = cmd
    locations = x.locations
    # Experimental results don't show real improvements with asynchronous = True
    asynchronous = False  # isinstance(locations[0], WebsocketClientWorker)

    try:
        shares_delta, shares_epsilon = [], []
        for location in locations:
            args = (x.child[location.id], y.child[location.id], op, dtype,
                    torch_dtype, field, location)
            share_delta, share_epsilon = remote(spdz_mask, location=location)(
                *args, return_value=True, return_arity=2)
            shares_delta.append(share_delta)
            shares_epsilon.append(share_epsilon)
    except EmptyCryptoPrimitiveStoreError as e:
        if sy.local_worker.crypto_store.force_preprocessing:
            raise
        crypto_provider.crypto_store.provide_primitives(workers=locations,
                                                        **e.kwargs_)
        return spdz_mul(cmd, x, y, crypto_provider, dtype, torch_dtype, field)

    delta = sum(shares_delta)
    epsilon = sum(shares_epsilon)

    for location, share_delta, share_epsilon in zip(locations, shares_delta,
                                                    shares_epsilon):
        location.de_register_obj(share_delta)
        location.de_register_obj(share_epsilon)
        del share_delta
        del share_epsilon

    if not asynchronous:
        shares = []
        for i, location in enumerate(locations):
            args = (th.LongTensor([i]), delta, epsilon, op, dtype, torch_dtype,
                    field, location)
            share = remote(spdz_compute, location=location)(*args,
                                                            return_value=False)
            shares.append(share)
    else:
        shares = asyncio.run(
            sy.local_worker.async_dispatch(
                workers=locations,
                commands=[(
                    full_name(spdz_compute),
                    None,
                    (th.LongTensor([i]), delta, epsilon, op),
                    {},
                ) for i in [0, 1]],
                return_value=False,
            ))

    shares = {loc.id: share for loc, share in zip(locations, shares)}

    response = sy.AdditiveSharingTensor(shares, **x.get_class_attributes())
    return response
コード例 #9
0
ファイル: functional.py プロジェクト: znbdata/PySyft
def _pool2d(input,
            kernel_size: int = 2,
            stride: int = 2,
            padding=0,
            dilation=1,
            ceil_mode=None,
            mode="avg"):
    if isinstance(kernel_size, tuple):
        assert kernel_size[0] == kernel_size[1]
        kernel_size = kernel_size[0]
    if isinstance(stride, tuple):
        assert stride[0] == stride[1]
        stride = stride[0]

    input_fp = input
    input = input.child

    locations = input.locations

    im_reshaped_shares = {}
    params = {}
    for location in locations:
        input_share = input.child[location.id]
        im_reshaped_shares[location.id], *params[location.id] = remote(
            _pre_pool, location=location)(input_share,
                                          kernel_size,
                                          stride,
                                          padding,
                                          dilation,
                                          return_value=False,
                                          return_arity=6)

    im_reshaped = sy.AdditiveSharingTensor(im_reshaped_shares,
                                           **input.get_class_attributes())

    if mode == "max":
        # We have optimisations when the kernel is small, namely a square of size 2 or 3
        # to reduce the number of rounds and the total number of comparisons.
        # See more in Appendice C.3 https://arxiv.org/pdf/2006.04593.pdf
        def max_half_split(tensor4d, half_size):
            """
            Split the tensor on 2 halves on the last dim and return the maximum half
            """
            left, right = tensor4d[:, :, :, :half_size], tensor4d[:, :, :,
                                                                  half_size:]
            max_half = left + (right >= left) * (right - left)
            return max_half

        if im_reshaped.shape[-1] == 4:
            # Compute the max as a binary tree: 2 steps are needed for 4 values
            res = max_half_split(im_reshaped, 2)
            res = max_half_split(res, 1)
        elif im_reshaped.shape[-1] == 9:
            # For 9 values we need 4 steps: we process the 8 first values and then
            # compute the max with the 9th value
            res = max_half_split(im_reshaped[:, :, :, :8], 4)
            res = max_half_split(res, 2)
            left = max_half_split(res, 1)
            right = im_reshaped[:, :, :, 8:]
            res = left + (right >= left) * (right - left)
        else:
            res = im_reshaped.max(dim=-1)
    elif mode == "avg":
        res = im_reshaped.mean(dim=-1)
    else:
        raise ValueError(f"In pool2d, mode should be avg or max, not {mode}.")

    res_shares = {}
    for location in locations:
        res_share = res.child[location.id]
        res_share = remote(_post_pool, location=location)(res_share,
                                                          *params[location.id])
        res_shares[location.id] = res_share

    result_fp = sy.FixedPrecisionTensor(**input_fp.get_class_attributes()).on(
        sy.AdditiveSharingTensor(res_shares, **res.get_class_attributes()),
        wrap=False)
    return result_fp
コード例 #10
0
ファイル: functional.py プロジェクト: znbdata/PySyft
def conv2d(input,
           weight,
           bias=None,
           stride=1,
           padding=0,
           dilation=1,
           groups=1):
    """
    Overloads torch.nn.functional.conv2d to be able to use MPC on convolutional networks.
    The idea is to unroll the input and weight matrices to compute a
    matrix multiplication equivalent to the convolution.
    Args:
        input: input image
        weight: convolution kernels
        bias: optional additive bias
        stride: stride of the convolution kernels
        padding:  implicit paddings on both sides of the input.
        dilation: spacing between kernel elements
        groups: split input into groups, in_channels should be divisible by the number of groups
    Returns:
        the result of the convolution as a fixed precision tensor.
    """
    input_fp, weight_fp = input, weight

    if isinstance(input.child, FrameworkTensor) or isinstance(
            weight.child, FrameworkTensor):
        assert isinstance(input.child, FrameworkTensor)
        assert isinstance(weight.child, FrameworkTensor)
        im_reshaped, weight_reshaped, *params = _pre_conv(
            input, weight, bias, stride, padding, dilation, groups)
        if groups > 1:
            res = []
            chunks_im = torch.chunk(im_reshaped, groups, dim=2)
            chunks_weights = torch.chunk(weight_reshaped, groups, dim=0)
            for g in range(groups):
                tmp = chunks_im[g].matmul(chunks_weights[g])
                res.append(tmp)
            result = torch.cat(res, dim=2)
        else:
            result = im_reshaped.matmul(weight_reshaped)
        result = _post_conv(bias, result, *params)
        return result.wrap()

    input, weight = input.child, weight.child

    if bias is not None:
        bias = bias.child
        assert isinstance(
            bias, sy.AdditiveSharingTensor
        ), "Have you provided bias as a kwarg? If so, please remove `bias=`."

    locations = input.locations

    im_reshaped_shares = {}
    weight_reshaped_shares = {}
    params = {}
    for location in locations:
        input_share = input.child[location.id]
        weight_share = weight.child[location.id]
        bias_share = bias.child[location.id] if bias is not None else None
        (
            im_reshaped_shares[location.id],
            weight_reshaped_shares[location.id],
            *params[location.id],
        ) = remote(_pre_conv, location=location)(
            input_share,
            weight_share,
            bias_share,
            stride,
            padding,
            dilation,
            groups,
            return_value=False,
            return_arity=6,
        )

    im_reshaped = sy.FixedPrecisionTensor(
        **input_fp.get_class_attributes()).on(sy.AdditiveSharingTensor(
            im_reshaped_shares, **input.get_class_attributes()),
                                              wrap=False)
    weight_reshaped = sy.FixedPrecisionTensor(
        **weight_fp.get_class_attributes()).on(sy.AdditiveSharingTensor(
            weight_reshaped_shares, **input.get_class_attributes()),
                                               wrap=False)

    # Now that everything is set up, we can compute the convolution as a simple matmul
    if groups > 1:
        res = []
        chunks_im = torch.chunk(im_reshaped, groups, dim=2)
        chunks_weights = torch.chunk(weight_reshaped, groups, dim=0)
        for g in range(groups):
            tmp = chunks_im[g].matmul(chunks_weights[g])
            res.append(tmp)
        res_fp = torch.cat(res, dim=2)
        res = res_fp.child
    else:
        res_fp = im_reshaped.matmul(weight_reshaped)
        res = res_fp.child

    # and then we reshape the result
    res_shares = {}
    for location in locations:
        bias_share = bias.child[location.id] if bias is not None else None
        res_share = res.child[location.id]
        res_share = remote(_post_conv,
                           location=location)(bias_share, res_share,
                                              *params[location.id])
        res_shares[location.id] = res_share

    result_fp = sy.FixedPrecisionTensor(**res_fp.get_class_attributes()).on(
        sy.AdditiveSharingTensor(res_shares, **res.get_class_attributes()),
        wrap=False)
    return result_fp