Exemple #1
0
def test_backward_for_additive_shared_binary_cmd_with_autograd(
        workers, cmd, backward_one):
    """
    Test .backward() on Additive Shared Tensor for a single operation
    """
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    a = (torch.tensor([[3.0, 2], [-1, 2]],
                      requires_grad=True).fix_prec().share(
                          alice, bob, crypto_provider=james))
    b = (torch.tensor([[1.0, 2], [3, 2]], requires_grad=True).fix_prec().share(
        alice, bob, crypto_provider=james))

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    c = getattr(a, cmd)(b)
    c_torch = getattr(a_torch, cmd)(b_torch)

    ones = torch.ones(c.shape).fix_prec().share(alice,
                                                bob,
                                                crypto_provider=james)
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones if backward_one else None)
    c_torch.backward(torch.ones(c_torch.shape))

    assert (a.grad.get().float_prec() == a_torch.grad).all()
    assert (b.grad.get().float_prec() == b_torch.grad).all()
Exemple #2
0
def test_backward_for_remote_inplace_binary_cmd_with_autograd(workers, cmd):
    alice = workers["alice"]

    a = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True).send(alice)
    b = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True).send(alice)
    c = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True).send(alice)

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)
    c = syft.AutogradTensor().on(c)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
    c_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    r = a * b
    getattr(r, cmd)(c)
    r_torch = a_torch * b_torch
    getattr(r_torch, cmd)(c_torch)

    ones = torch.ones(r.shape).send(alice)
    ones = syft.AutogradTensor().on(ones)
    r.backward(ones)
    r_torch.backward(torch.ones(r_torch.shape))

    assert (a.grad.get() == a_torch.grad).all()
    assert (b.grad.get() == b_torch.grad).all()
    assert (c.grad.get() == c_torch.grad).all()
Exemple #3
0
def test_backward_for_inplace_binary_cmd_with_autograd(cmd):

    a = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
    c = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)
    c = syft.AutogradTensor().on(c)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
    c_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    r = a * b
    getattr(r, cmd)(c)
    r_torch = a_torch * b_torch
    getattr(r_torch, cmd)(c_torch)

    ones = torch.ones(r.shape)
    ones = syft.AutogradTensor().on(ones)
    r.backward(ones)
    r_torch.backward(torch.ones(r_torch.shape))

    assert (a.child.grad == a_torch.grad).all()
    assert (b.child.grad == b_torch.grad).all()
    assert (c.child.grad == c_torch.grad).all()
Exemple #4
0
def test_backward_for_remote_binary_cmd_with_autograd(workers, cmd,
                                                      backward_one):
    """
    Test .backward() on remote tensors using explicit wrapping
    with an Autograd Tensor.
    """
    alice = workers["alice"]

    a = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True).send(alice)
    b = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True).send(alice)

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    c = getattr(a, cmd)(b)
    c_torch = getattr(a_torch, cmd)(b_torch)

    ones = torch.ones(c.shape).send(alice)
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones if backward_one else None)
    c_torch.backward(torch.ones(c_torch.shape))

    assert (a.grad.get() == a_torch.grad).all()
    assert (b.grad.get() == b_torch.grad).all()
Exemple #5
0
def test_backward_for_binary_cmd_with_inputs_of_different_dim_and_autograd(
        cmd, shapes):
    """
    Test .backward() on local tensors wrapped in an AutogradTensor
    (It is useless but this is the most basic example)
    """
    a_shape, b_shape = shapes
    a = torch.ones(a_shape, requires_grad=True)
    b = torch.ones(b_shape, requires_grad=True)

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)

    a_torch = torch.ones(a_shape, requires_grad=True)
    b_torch = torch.ones(b_shape, requires_grad=True)

    c = getattr(a, cmd)(b)
    c_torch = getattr(a_torch, cmd)(b_torch)

    ones = torch.ones(c.shape)
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones)
    c_torch.backward(torch.ones(c_torch.shape))

    assert (a.child.grad == a_torch.grad).all()
    assert (b.child.grad == b_torch.grad).all()
Exemple #6
0
def test_backward_for_additive_shared_div_with_autograd(workers, backward_one):
    """
    Test .backward() on Additive Shared Tensor for division with an integer
    """
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    a = (torch.tensor([[3.0, 2], [-1, 2]],
                      requires_grad=True).fix_prec().share(
                          alice, bob, crypto_provider=james))
    b = 2

    a = syft.AutogradTensor().on(a)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = 2

    c = a / b
    c_torch = a_torch / b_torch

    ones = torch.ones(c.shape).fix_prec().share(alice,
                                                bob,
                                                crypto_provider=james)
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones if backward_one else None)
    c_torch.backward(torch.ones(c_torch.shape))

    assert (a.grad.get().float_prec() == a_torch.grad).all()
Exemple #7
0
def test_addmm_backward_for_additive_shared_with_autograd(workers):
    """
    Test .backward() on Additive Shared Tensor for addmm
    """
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    a = (torch.tensor([[3.0, 2], [-1, 2]],
                      requires_grad=True).fix_prec().share(
                          alice, bob, crypto_provider=james))
    b = (torch.tensor([[1.0, 2], [3, 2]], requires_grad=True).fix_prec().share(
        alice, bob, crypto_provider=james))
    c = (torch.tensor([[2.0, 2], [0, 1]], requires_grad=True).fix_prec().share(
        alice, bob, crypto_provider=james))

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)
    c = syft.AutogradTensor().on(c)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
    c_torch = torch.tensor([[2.0, 2], [0, 1]], requires_grad=True)

    r = torch.addmm(c, a, b)
    r_torch = torch.addmm(c_torch, a_torch, b_torch)

    ones = torch.ones(r.shape).fix_prec().share(alice,
                                                bob,
                                                crypto_provider=james)
    ones = syft.AutogradTensor().on(ones)
    r.backward(ones)
    r_torch.backward(torch.ones(r_torch.shape))

    assert (a.grad.get().float_prec() == a_torch.grad).all()
    assert (b.grad.get().float_prec() == b_torch.grad).all()
    assert (c.grad.get().float_prec() == c_torch.grad).all()
Exemple #8
0
def test_encrypted_training_with_linear_model(workers):
    """
    Test a minimal example of encrypted training using nn.Linear
    """
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    # A Toy Dataset
    data = (torch.tensor([[0, 0], [0, 1], [1, 0],
                          [1, 1.0]]).fix_prec().share(bob,
                                                      alice,
                                                      crypto_provider=james))
    target = (torch.tensor([[0], [0], [1],
                            [1.0]]).fix_prec().share(bob,
                                                     alice,
                                                     crypto_provider=james))

    # A Toy Model
    model = nn.Linear(2, 1).fix_precision().share(bob,
                                                  alice,
                                                  crypto_provider=james)

    data = syft.AutogradTensor().on(data)
    target = syft.AutogradTensor().on(target)
    model.weight = syft.AutogradTensor().on(model.weight)
    model.bias = syft.AutogradTensor().on(model.bias)

    def train():
        # Training Logic
        # Convert the learning rate to fixed precision
        opt = optim.SGD(params=model.parameters(), lr=0.1).fix_precision()

        for iter in range(10):

            # 1) erase previous gradients (if they exist)
            opt.zero_grad()

            # 2) make a prediction
            pred = model(data)

            # 3) calculate how much we missed
            loss = ((pred - target)**2).sum()

            # 4) figure out which weights caused us to miss
            loss.backward()

            # 5) change those weights
            opt.step()

        return loss

    loss = train()

    assert loss.child.child.child.virtual_get() < 500
Exemple #9
0
def test_backward_for_linear_model_on_additive_shared_with_autograd(workers):
    """
    Test .backward() on Additive Shared tensors with mixed operations
    """
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    x = torch.tensor([[1.0, 2], [1.0,
                                 2]]).fix_prec().share(bob,
                                                       alice,
                                                       crypto_provider=james)
    target = torch.tensor([[1.0],
                           [1.0]]).fix_prec().share(bob,
                                                    alice,
                                                    crypto_provider=james)
    model = nn.Linear(2, 1)
    model.weight = nn.Parameter(torch.tensor([[-1.0, 2]]))
    model.bias = nn.Parameter(torch.tensor([-1.0]))
    model.fix_precision().share(bob, alice, crypto_provider=james)

    x = syft.AutogradTensor().on(x)
    target = syft.AutogradTensor().on(target)
    model.weight = syft.AutogradTensor().on(model.weight)
    model.bias = syft.AutogradTensor().on(model.bias)

    output = model(x)
    loss = ((output - target)**2).sum()
    one = torch.ones(loss.shape).fix_prec().share(bob,
                                                  alice,
                                                  crypto_provider=james)
    one = syft.AutogradTensor().on(one)
    loss.backward(one)

    weight_grad = model.weight.grad.get().float_precision()
    bias_grad = model.bias.grad.get().float_precision()

    x = torch.tensor([[1.0, 2], [1.0, 2]])
    target = torch.tensor([[1.0], [1.0]])
    model = nn.Linear(2, 1)
    model.weight = nn.Parameter(torch.tensor([[-1.0, 2]]))
    model.bias = nn.Parameter(torch.tensor([-1.0]))

    output = model(x)
    loss = ((output - target)**2).sum()

    one = torch.ones(loss.shape)
    loss.backward(one)
    assert (model.weight.grad == weight_grad).all()
    assert (model.bias.grad == bias_grad).all()
Exemple #10
0
def test_backward_for_remote_binary_cmd_local_autograd(workers, cmd):
    """
    Test .backward() on remote tensors using implicit wrapping
    with an Autograd Tensor.

    Distinguish the current use of:
        a = torch.tensor([[3., 2], [-1, 2]], requires_grad=True)
        a.send(alice, local_autograd=True)

    instead of the previous:
        a = torch.tensor([[3., 2], [-1, 2]], requires_grad=True).send(alice)
        a = syft.AutogradTensor().on(a)
    """
    alice = workers["alice"]

    a = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    a = a.send(alice, local_autograd=True)
    b = b.send(alice, local_autograd=True)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    c = getattr(a, cmd)(b)
    c_torch = getattr(a_torch, cmd)(b_torch)

    ones = torch.ones(c.shape).send(alice)
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones)
    c_torch.backward(torch.ones(c_torch.shape))

    assert (a.grad.get() == a_torch.grad).all()
    assert (b.grad.get() == b_torch.grad).all()
Exemple #11
0
    def share(
        self,
        *owners: List[BaseWorker],
        protocol: str = "snn",
        field: Union[int, None] = None,
        dtype: Union[str, None] = None,
        crypto_provider: Union[BaseWorker, None] = None,
        requires_grad: bool = False,
        no_wrap: bool = False,
    ):
        """This is a pass through method which calls .share on the child.

        Args:
            owners (list): A list of BaseWorker objects determining who to send shares to.
            protocol (str): the crypto protocol used to perform the computations ('snn' or 'fss')
            field (int or None): The arithmetic field where live the shares.
            dtype (str or None): The dtype of shares
            crypto_provider (BaseWorker or None): The worker providing the crypto primitives.
            requires_grad (bool): Should we add AutogradTensor to allow gradient computation,
                default is False.
        """
        if protocol == "falcon":
            shared_tensor = syft.ReplicatedSharingTensor(
                owner=self.owner).share_secret(self, owners)
            return shared_tensor
        if self.has_child():
            chain = self.child

            kwargs_ = ({
                "requires_grad": requires_grad
            } if isinstance(chain, syft.PointerTensor) else {})
            shared_tensor = chain.share(
                *owners,
                protocol=protocol,
                field=field,
                dtype=dtype,
                crypto_provider=crypto_provider,
                **kwargs_,
            )
        else:
            if self.type() == "torch.FloatTensor":
                raise TypeError(
                    "FloatTensor cannot be additively shared, Use fix_precision."
                )

            shared_tensor = (syft.AdditiveSharingTensor(
                protocol=protocol,
                field=field,
                dtype=dtype,
                crypto_provider=crypto_provider,
                owner=self.owner,
            ).on(self.copy(), wrap=False).share_secret(*owners))

        if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):
            shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)

        if not no_wrap:
            shared_tensor = shared_tensor.wrap(type=self.dtype)

        return shared_tensor
Exemple #12
0
    def share(
        self,
        *owners: List[BaseWorker],
        field: Union[int, None] = None,
        crypto_provider: Union[BaseWorker, None] = None,
        requires_grad: bool = False,
    ):
        """This is a pass through method which calls .share on the child.

        Args:
            owners (list): A list of BaseWorker objects determining who to send shares to.
            field (int or None): The arithmetic field where live the shares.
            crypto_provider (BaseWorker or None): The worker providing the crypto primitives.
            requires_grad (bool): Should we add AutogradTensor to allow gradient computation,
                default is False.
        """

        shared_tensor = self
        if self.has_child():
            self.child = self.child.share(*owners,
                                          field=field,
                                          crypto_provider=crypto_provider)
        else:
            shared_tensor = (syft.AdditiveSharingTensor(
                field=field, crypto_provider=crypto_provider,
                owner=self.owner).on(self).child.init_shares(*owners).wrap())

        if requires_grad:
            shared_tensor = syft.AutogradTensor().on(shared_tensor)

        return shared_tensor
Exemple #13
0
def test_get_float_prec_on_autograd_tensor(workers):
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    x = torch.tensor([0.1, 1.0])
    x2 = syft.AutogradTensor().on(x.fix_prec())
    assert (x2.float_precision() == x).all()

    x = torch.tensor([1, 2])
    x2 = x.share(bob, alice, crypto_provider=james)
    x2 = syft.AutogradTensor().on(x2)
    assert (x2.get() == x).all()

    x = torch.tensor([0.1, 1.0])
    x2 = x.fix_precision()
    x2 = x2.share(bob, alice, crypto_provider=james, requires_grad=True)
    assert (x2.get().float_precision() == x).all()
Exemple #14
0
    def share(
        self,
        *owners: List[BaseWorker],
        field: Union[int, None] = None,
        crypto_provider: Union[BaseWorker, None] = None,
        requires_grad: bool = False,
        no_wrap: bool = False,
    ):
        """This is a pass through method which calls .share on the child.

        Args:
            owners (list): A list of BaseWorker objects determining who to send shares to.
            field (int or None): The arithmetic field where live the shares.
            crypto_provider (BaseWorker or None): The worker providing the crypto primitives.
            requires_grad (bool): Should we add AutogradTensor to allow gradient computation,
                default is False.
        """
        if crypto_provider is None:
            warnings.warn(
                "'crypto_provider' should not be None. Please provide a dedicated worker, which will act "
                "as a trusted third party and will provide the crypto primitives needed for Multi Party "
                "Computation.")
        elif crypto_provider.is_client_worker:
            warnings.warn("client worker cannot be a crypto_provider.")

        if self.has_child():
            chain = self.child

            kwargs = ({
                "requires_grad": requires_grad
            } if isinstance(chain, syft.PointerTensor) else {})
            shared_tensor = chain.share(*owners,
                                        field=field,
                                        crypto_provider=crypto_provider,
                                        **kwargs)
        else:
            if self.type() == "torch.FloatTensor":
                raise TypeError(
                    "FloatTensor cannot be additively shared, Use fix_precision."
                )

            shared_tensor = (syft.AdditiveSharingTensor(
                field=field, crypto_provider=crypto_provider,
                owner=self.owner).on(self.copy(),
                                     wrap=False).init_shares(*owners))

        if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):
            shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)

        if not no_wrap:
            shared_tensor = shared_tensor.wrap()

        return shared_tensor
Exemple #15
0
def test_backward_for_linear_model_on_fix_prec_params_with_autograd():
    """
    Test .backward() on Fixed Precision parameters with mixed operations
    """
    x = torch.tensor([[1.0, 2], [1.0, 2]]).fix_prec()
    target = torch.tensor([[1.0], [1.0]]).fix_prec()
    model = nn.Linear(2, 1)
    model.weight = nn.Parameter(torch.tensor([[-1.0, 2]]))
    model.bias = nn.Parameter(torch.tensor([-1.0]))
    model.fix_precision()

    x = syft.AutogradTensor().on(x)
    target = syft.AutogradTensor().on(target)
    model.weight = syft.AutogradTensor().on(model.weight)
    model.bias = syft.AutogradTensor().on(model.bias)

    output = model(x)
    loss = ((output - target)**2).sum()
    one = torch.ones(loss.shape).fix_prec()
    one = syft.AutogradTensor().on(one)
    loss.backward(one)

    weight_grad = model.weight.grad.float_precision()
    bias_grad = model.bias.grad.float_precision()

    x = torch.tensor([[1.0, 2], [1.0, 2]])
    target = torch.tensor([[1.0], [1.0]])
    model = nn.Linear(2, 1)
    model.weight = nn.Parameter(torch.tensor([[-1.0, 2]]))
    model.bias = nn.Parameter(torch.tensor([-1.0]))

    output = model(x)
    loss = ((output - target)**2).sum()

    one = torch.ones(loss.shape)
    loss.backward(one)
    assert (model.weight.grad == weight_grad).all()
    assert (model.bias.grad == bias_grad).all()
Exemple #16
0
def test_backward_for_fix_prec_binary_cmd_with_autograd(cmd, backward_one):
    """
    Test .backward() on Fixed Precision Tensor for a single operation
    """
    a = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True).fix_prec()
    b = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True).fix_prec()

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    c = getattr(a, cmd)(b)
    c_torch = getattr(a_torch, cmd)(b_torch)

    ones = torch.ones(c.shape).fix_prec()
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones if backward_one else None)
    c_torch.backward(torch.ones(c_torch.shape))

    assert (a.grad.float_prec() == a_torch.grad).all()
    assert (b.grad.float_prec() == b_torch.grad).all()
Exemple #17
0
def test_backward_for_binary_cmd_with_autograd(cmd, backward_one):
    """
    Test .backward() on local tensors wrapped in an AutogradTensor
    (It is useless but this is the most basic example)
    """
    a = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)

    c = getattr(a, cmd)(b)
    c_torch = getattr(a_torch, cmd)(b_torch)

    ones = torch.ones(c.shape)
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones if backward_one else None)
    c_torch.backward(torch.ones(c_torch.shape))

    assert (a.child.grad == a_torch.grad).all()
    assert (b.child.grad == b_torch.grad).all()
Exemple #18
0
    def share(
        self,
        *owners: List[BaseWorker],
        field: Union[int, None] = None,
        crypto_provider: Union[BaseWorker, None] = None,
        requires_grad: bool = False,
        no_wrap: bool = False,
    ):
        """This is a pass through method which calls .share on the child.

        Args:
            owners (list): A list of BaseWorker objects determining who to send shares to.
            field (int or None): The arithmetic field where live the shares.
            crypto_provider (BaseWorker or None): The worker providing the crypto primitives.
            requires_grad (bool): Should we add AutogradTensor to allow gradient computation,
                default is False.
        """

        if self.has_child():
            chain = self.child.copy()
            chain.owner = self.child.owner

            kwargs = (
                {"requires_grad": requires_grad} if isinstance(chain, syft.PointerTensor) else {}
            )
            shared_tensor = chain.share(
                *owners, field=field, crypto_provider=crypto_provider, **kwargs
            )
        else:
            shared_tensor = (
                syft.AdditiveSharingTensor(
                    field=field, crypto_provider=crypto_provider, owner=self.owner
                )
                .on(self.copy())
                .child.init_shares(*owners)
            )

        if not no_wrap:
            shared_tensor = shared_tensor.wrap()

        if requires_grad and not (
            shared_tensor.is_wrapper and isinstance(shared_tensor.child, syft.PointerTensor)
        ):
            shared_tensor = syft.AutogradTensor().on(shared_tensor)

        return shared_tensor
Exemple #19
0
    def share_(self, *args, **kwargs):
        """
        Allows to call .share() as an inplace operation
        """
        if self.has_child():
            requires_grad = kwargs.get("requires_grad", False)
            # Reset the requires_grad kwargs if the call is local
            if not isinstance(self.child, syft.PointerTensor):
                kwargs["requires_grad"] = False

            shared_tensor = self.child.share_(*args, **kwargs)

            if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):
                shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)

            self.child = shared_tensor
            return self
        else:
            return self.share(*args, **kwargs)  # TODO change to inplace
Exemple #20
0
def test_backward_for_remote_unary_cmd_local_autograd(workers, cmd):
    """
    Test .backward() on unary methods on remote tensors using
    implicit wrapping
    """
    alice = workers["alice"]

    a = torch.tensor([0.3, 0.2, 0], requires_grad=True)
    a = a.send(alice, local_autograd=True)

    a_torch = torch.tensor([0.3, 0.2, 0], requires_grad=True)

    c = getattr(a, cmd)()
    c_torch = getattr(a_torch, cmd)()

    ones = torch.ones(c.shape).send(alice)
    ones = syft.AutogradTensor().on(ones)
    c.backward(ones)
    c_torch.backward(torch.ones_like(c_torch))

    # Have to do .child.grad here because .grad doesn't work on Wrappers yet
    assert (a.grad.get() == a_torch.grad).all()
Exemple #21
0
    def create_pointer(
        self,
        location: BaseWorker = None,
        id_at_location: (str or int) = None,
        register: bool = False,
        owner: BaseWorker = None,
        ptr_id: (str or int) = None,
        garbage_collect_data: bool = True,
        shape=None,
        local_autograd=False,
        preinitialize_grad=False,
    ) -> PointerTensor:
        """Creates a pointer to the "self" torch.Tensor object.

        This method is called on a torch.Tensor object, returning a pointer
        to that object. This method is the CORRECT way to create a pointer,
        and the parameters of this method give all possible attributes that
        a pointer can be created with.

        Args:
            location: The BaseWorker object which points to the worker on which
                this pointer's object can be found. In nearly all cases, this
                is self.owner and so this attribute can usually be left blank.
                Very rarely you may know that you are about to move the Tensor
                to another worker so you can pre-initialize the location
                attribute of the pointer to some other worker, but this is a
                rare exception.
            id_at_location: A string or integer id of the tensor being pointed
                to. Similar to location, this parameter is almost always
                self.id and so you can leave this parameter to None. The only
                exception is if you happen to know that the ID is going to be
                something different than self.id, but again this is very rare
                and most of the time, setting this means that you are probably
                doing something you shouldn't.
            register: A boolean parameter (default False) that determines
                whether to register the new pointer that gets created. This is
                set to false by default because most of the time a pointer is
                initialized in this way so that it can be sent to someone else
                (i.e., "Oh you need to point to my tensor? let me create a
                pointer and send it to you" ). Thus, when a pointer gets
                created, we want to skip being registered on the local worker
                because the pointer is about to be sent elsewhere. However, if
                you are initializing a pointer you intend to keep, then it is
                probably a good idea to register it, especially if there is any
                chance that someone else will initialize a pointer to your
                pointer.
            owner: A BaseWorker parameter to specify the worker on which the
                pointer is located. It is also where the pointer is registered
                if register is set to True.
            ptr_id: A string or integer parameter to specify the id of the pointer
                in case you wish to set it manually for any special reason.
                Otherwise, it will be set randomly.
            garbage_collect_data: If true (default), delete the remote tensor when the
                pointer is deleted.
            local_autograd: Use autograd system on the local machine instead of PyTorch's
                autograd on the workers.
            preinitialize_grad: Initialize gradient for AutogradTensors to a tensor.

        Returns:
            A torch.Tensor[PointerTensor] pointer to self. Note that this
            object will likely be wrapped by a torch.Tensor wrapper.
        """
        if owner is None:
            owner = self.owner

        if location is None:
            location = self.owner.id

        owner = self.owner.get_worker(owner)
        location = self.owner.get_worker(location)

        if id_at_location is None:
            id_at_location = self.id

        if ptr_id is None:
            if location.id != self.owner.id:
                ptr_id = self.id
            else:
                ptr_id = syft.ID_PROVIDER.pop()

        if shape is None:
            shape = self.shape

        # previous_pointer = owner.get_pointer_to(location, id_at_location)
        previous_pointer = None

        if previous_pointer is None:
            ptr = PointerTensor(
                location=location,
                id_at_location=id_at_location,
                owner=owner,
                id=ptr_id,
                garbage_collect_data=garbage_collect_data,
                shape=shape,
                tags=self.tags,
                description=self.description,
            )

        if self.requires_grad and local_autograd:
            ptr = syft.AutogradTensor(
                data=ptr.wrap(),
                preinitialize_grad=preinitialize_grad).on(ptr, wrap=False)

        return ptr
Exemple #22
0
    def send(
        self,
        *location,
        inplace: bool = False,
        user: object = None,
        local_autograd: bool = False,
        requires_grad: bool = False,
        preinitialize_grad: bool = False,
        no_wrap: bool = False,
        garbage_collect_data: bool = True,
    ):
        """Gets the pointer to a new remote object.

        One of the most commonly used methods in PySyft, this method serializes the object upon
        which it is called (self), sends the object to a remote worker, creates a pointer to
        that worker, and then returns that pointer from this function.

        Args:
            location: The BaseWorker object which you want to send this object to. Note that
                this is never actually the BaseWorker but instead a class which instantiates the
                BaseWorker abstraction.
            inplace: if true, return the same object instance, else a new wrapper
            user (object,optional): User credentials to be verified.
            local_autograd: Use autograd system on the local machine instead of PyTorch's
                autograd on the workers.
            requires_grad: Default to False. If true, whenever the remote value of this tensor
                will have its gradient updated (for example when calling .backward()), a call
                will be made to set back the local gradient value.
            preinitialize_grad: Initialize gradient for AutogradTensors to a tensor
            no_wrap: If True, wrap() is called on the created pointer
            garbage_collect_data: argument passed down to create_pointer()

        Returns:
            A torch.Tensor[PointerTensor] pointer to self. Note that this
            object will likely be wrapped by a torch.Tensor wrapper.

        Raises:
                SendNotPermittedError: Raised if send is not permitted on this tensor.
        """

        # If you send a pointer p1, you want the pointer to pointer p2 to control
        # the garbage collection and not the remaining old p1 (here self). Because if
        # p2 is not GCed, GCing p1 shouldn't delete the remote tensor, but if you
        # want to do so, as p2 is not GCed, you can still do `del p2`.
        # This allows to chain multiple .send().send() calls.

        if len(location) == 1:

            location = location[0]

            if self.has_child() and isinstance(self.child, PointerTensor):
                self.child.garbage_collect_data = False
                if self._is_parameter():
                    self.data.child.garbage_collect_data = False

            ptr = self.owner.send(
                self,
                location,
                local_autograd=local_autograd,
                requires_grad=requires_grad,
                preinitialize_grad=preinitialize_grad,
                garbage_collect_data=garbage_collect_data,
            )

            ptr.description = self.description
            ptr.tags = self.tags

            # The last pointer should control remote GC, not the previous self.ptr
            if hasattr(self, "ptr") and self.ptr is not None:
                ptr_ = self.ptr()
                if ptr_ is not None:
                    ptr_.garbage_collect_data = False

            # we need to cache this weak reference to the pointer so that
            # if this method gets called multiple times we can simply re-use
            # the same pointer which was previously created
            self.ptr = weakref.ref(ptr)

            if self._is_parameter():
                if inplace:
                    self.is_wrapper = True
                    with torch.no_grad():
                        self.set_()
                    self.data = ptr
                    output = self
                else:
                    if no_wrap:
                        raise ValueError(
                            "Parameters can't accept no_wrap=True")
                    wrapper = torch.Tensor()
                    param_wrapper = torch.nn.Parameter(wrapper)
                    param_wrapper.is_wrapper = True
                    with torch.no_grad():
                        param_wrapper.set_()
                    param_wrapper.data = ptr
                    output = param_wrapper
            else:
                if inplace:
                    self.is_wrapper = True
                    self.set_()
                    self.child = ptr
                    return self
                else:
                    output = ptr if no_wrap else ptr.wrap()

            if self.requires_grad:
                # This is for AutogradTensor to work on MultiPointerTensors
                # With pre-initialized gradients, this should get it from AutogradTensor.grad
                if preinitialize_grad:
                    grad = output.child.grad
                else:
                    grad = output.attr("grad")

                output.grad = grad

                # Because of the way PyTorch works, .grad is prone to
                # create entirely new Python objects for the tensor, which
                # inadvertently deletes our custom attributes (like .child)
                # But, if we keep a backup reference around, PyTorch seems
                # to re-use it, which means .grad keeps the attributes we
                # want it to keep. #HackAlert
                output.backup_grad = grad

            if local_autograd:
                output = syft.AutogradTensor(
                    data=output,
                    preinitialize_grad=preinitialize_grad).on(output)

        else:

            children = []
            for loc in location:
                children.append(self.clone().send(loc, no_wrap=True))

            output = syft.MultiPointerTensor(children=children)

            if not no_wrap:
                output = output.wrap()

        return output