示例#1
0
    def test_plus_is_minus_backward_remote(self):
        x = sy.Variable(torch.FloatTensor([5, 6]), requires_grad=True)
        y = sy.Variable(torch.FloatTensor([3, 4]), requires_grad=True)
        x = sy._PlusIsMinusTensor().on(x)
        y = sy._PlusIsMinusTensor().on(y)
        x.send(bob)
        y.send(bob)

        z = x.add(y).sum()
        z.backward()

        # cut chain for the equality check
        x.get()
        x.child = x.child.child

        # TODO: figure out why some machines prefer one of these options
        # while others prefer the other
        try:
            target = sy._PlusIsMinusTensor().on(torch.FloatTensor([1, 1]))
            target.child = target.child.child
            assert torch.equal(x.grad.data, target)
        except:
            target = sy._PlusIsMinusTensor().on(torch.FloatTensor([1, 1]))
            target.child = target.child
            assert torch.equal(x.grad.data, target)
示例#2
0
    def test_plus_is_minus_variable_local(self):
        x = sy.Variable(torch.FloatTensor([5, 6]))
        y = sy.Variable(torch.FloatTensor([3, 4]))
        x = sy._PlusIsMinusTensor().on(x)
        y = sy._PlusIsMinusTensor().on(y)

        display = 'Variable > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - - Variable > _PlusIsMinusTensor > _LocalTensor\n' \
                  '   - FloatTensor > _PlusIsMinusTensor > _LocalTensor'

        assert torch_utils.chain_print(x, display=False) == display

        z = x.add(y)

        assert torch_utils.chain_print(z,
                                 display=False) == 'Variable > _PlusIsMinusTensor > ' \
                                                   '_LocalTensor\n - FloatTensor >' \
                                                   ' _PlusIsMinusTensor > _LocalTensor'

        # cut chain for the equality check
        z.data.child = z.data.child.child
        assert torch.equal(z.data, torch.FloatTensor([2, 2]))

        z = torch.add(x, y)

        # cut chain for the equality check
        z.data.child = z.data.child.child
        assert torch.equal(z.data, torch.FloatTensor([2, 2]))
示例#3
0
    def test_remote_backprop(self):

        x = sy.Variable(torch.ones(2, 2), requires_grad=True).send(bob)
        x2 = sy.Variable(torch.ones(2, 2) * 2, requires_grad=True).send(bob)

        y = x * x2

        y.sum().backward()

        # remote grads should be correct
        assert (bob._objects[x2.child.id_at_location].child.grad.data ==
                torch.ones(2, 2)).all()
        # In particular, you can call .grad on a syft tensor, which make .child and .grad commutative
        assert (bob._objects[x2.child.id_at_location].grad.child.data ==
                torch.ones(2, 2)).all()
        assert (bob._objects[x.child.id_at_location].child.grad.data ==
                torch.ones(2, 2) * 2).all()

        assert (y.get().data == torch.ones(2, 2) * 2).all()

        assert (x.get().data == torch.ones(2, 2)).all()
        assert (x2.get().data == torch.ones(2, 2) * 2).all()

        assert (x.grad.data == torch.ones(2, 2) * 2).all()
        assert (x2.grad.data == torch.ones(2, 2)).all()
示例#4
0
 def test_local_var_binary_methods(self):
     ''' Unit tests for methods mentioned on issue 1385
         https://github.com/OpenMined/PySyft/issues/1385'''
     x = torch.FloatTensor([1, 2, 3, 4])
     y = torch.FloatTensor([[1, 2, 3, 4]])
     z = torch.matmul(x, y.t())
     assert (torch.equal(z, torch.FloatTensor([30])))
     z = torch.add(x, y)
     assert (torch.equal(z, torch.FloatTensor([[2, 4, 6, 8]])))
     x = sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5]))
     y = sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5]))
     assert torch.equal(x.add_(y),
                        sy.Variable(torch.FloatTensor([2, 4, 6, 8, 10])))
     x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     z = torch.cross(x, y, dim=1)
     assert (torch.equal(
         z, torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])))
     x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     z = torch.dist(x, y)
     t = torch.FloatTensor([z])
     assert (torch.equal(t, torch.FloatTensor([0.])))
     x = torch.FloatTensor([1, 2, 3])
     y = torch.FloatTensor([1, 2, 3])
     z = torch.dot(x, y)
     t = torch.FloatTensor([z])
     assert torch.equal(t, torch.FloatTensor([14]))
     z = torch.eq(x, y)
     assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))
     z = torch.ge(x, y)
     assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))
示例#5
0
 def test_torch_function_with_multiple_input_on_remote_var(self):
     x = sy.Variable(torch.FloatTensor([1, 2]))
     y = sy.Variable(torch.FloatTensor([3, 4]))
     x.send(bob)
     y.send(bob)
     z = torch.stack([x, y])
     z.get()
     assert torch.equal(z, sy.Variable(torch.FloatTensor([[1, 2], [3, 4]])))
示例#6
0
 def test_torch_function_on_remote_var(self):
     x = sy.Variable(torch.FloatTensor([[1, 2], [3, 4]]))
     y = sy.Variable(torch.FloatTensor([[1, 2], [1, 2]]))
     x.send(bob)
     y.send(bob)
     z = torch.matmul(x, y)
     z.get()
     assert torch.equal(z, sy.Variable(torch.FloatTensor([[3, 6], [7, 14]])))
示例#7
0
 def test_torch_F_conv2d_on_remote_var(self):
     x = sy.Variable(torch.FloatTensor([[[[1, -1, 2], [-1, 0, 1], [1, 0, -2]]]]))
     x.send(bob)
     weight = torch.nn.Parameter(torch.FloatTensor([[[[1, -1], [-1, 1]]]]))
     bias = torch.nn.Parameter(torch.FloatTensor([0]))
     weight.send(bob)
     bias.send(bob)
     conv = F.conv2d(x, weight, bias, stride=(1, 1))
     conv.get()
     expected_conv = sy.Variable(torch.FloatTensor([[[[3, -2], [-2, -3]]]]))
     assert torch.equal(conv, expected_conv)
示例#8
0
    def test_plus_is_minus_backward_local(self):
        x = sy.Variable(torch.FloatTensor([5, 6]), requires_grad=True)
        y = sy.Variable(torch.FloatTensor([3, 4]), requires_grad=True)
        x = sy._PlusIsMinusTensor().on(x)
        y = sy._PlusIsMinusTensor().on(y)
        z = x.add(y).sum()
        z.backward()

        # cut chain for the equality check
        x.grad.data.child = x.grad.data.child.child
        assert torch.equal(x.grad.data, torch.FloatTensor([1, 1]))
示例#9
0
    def test_torch_nn_conv2d_on_remote_var(self):

        x = sy.Variable(torch.FloatTensor([[[[1, -1, 2], [-1, 0, 1], [1, 0, -2]]]]))
        x.send(bob)
        convolute = torch.nn.Conv2d(1, 1, 2, stride=1, padding=0)
        convolute.weight = torch.nn.Parameter(torch.FloatTensor([[[[1, -1], [-1, 1]]]]))
        convolute.bias = torch.nn.Parameter(torch.FloatTensor([0]))
        convolute.send(bob)
        conv = convolute(x)
        conv.get()
        expected_conv = sy.Variable(torch.FloatTensor([[[[3, -2], [-2, -3]]]]))
        assert torch.equal(conv, expected_conv)
示例#10
0
    def test_plus_is_minus_variable_remote(self):
        x = sy.Variable(torch.FloatTensor([5, 6]))
        y = sy.Variable(torch.FloatTensor([3, 4]))
        x = sy._PlusIsMinusTensor().on(x)
        y = sy._PlusIsMinusTensor().on(y)

        id1 = random.randint(0, 10e10)
        id2 = random.randint(0, 10e10)
        id11 = random.randint(0, 10e10)
        id21 = random.randint(0, 10e10)
        x.send(bob, new_id=id1, new_data_id=id11)
        y.send(bob, new_id=id2, new_data_id=id21)

        z = x.add(y)
        assert torch_utils.chain_print(z, display=False) == 'Variable > _PointerTensor\n' \
                                                      ' - FloatTensor > _PointerTensor\n' \
                                                      ' - - Variable > _PointerTensor\n' \
                                                      '   - FloatTensor > _PointerTensor'

        assert bob._objects[z.id_at_location].owner.id == 'bob'
        assert bob._objects[z.data.id_at_location].owner.id == 'bob'

        # Check chain on remote
        ptr_id = x.child.id_at_location
        display = 'Variable > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - - Variable > _PlusIsMinusTensor > _LocalTensor\n' \
                  '   - FloatTensor > _PlusIsMinusTensor > _LocalTensor'
        assert torch_utils.chain_print(bob._objects[ptr_id].parent,
                                       display=False) == display

        # Check chain on remote
        # TODO For now we don't reconstruct the grad chain one non-leaf variable (in our case a leaf
        # variable is a variable that we sent), because we don't care about their gradient. But if we do,
        # then this is a TODO!
        ptr_id = z.child.id_at_location
        display = 'Variable > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - - Variable > _LocalTensor\n' \
                  '   - FloatTensor > _LocalTensor'
        assert torch_utils.chain_print(bob._objects[ptr_id].parent,
                                       display=False) == display

        z.get()
        display = 'Variable > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n' \
                  ' - - Variable > _LocalTensor\n' \
                  '   - FloatTensor > _LocalTensor'
        assert torch_utils.chain_print(z, display=False) == display

        # cut chain for the equality check
        z.data.child = z.data.child.child
        assert torch.equal(z.data, torch.FloatTensor([2, 2]))
示例#11
0
 def test_encode_variable(self):
     # Given
     data = array([1, 2], id=123).torch()
     obj = sy.Variable(data, requires_grad=False)
     obj.child = None
     expected = {
         "mode": "subscribe",
         "obj": {
             "__Variable__": {
                 "child": {
                     "___LocalTensor__": {
                         "id": 76_308_044_977,
                         "owner": "me",
                         "torch_type": "Variable",
                     }
                 },
                 "data": {
                     "__FloatTensor__": {
                         "child": {
                             "___LocalTensor__": {
                                 "id": 32_071_180_896,
                                 "owner": "me",
                                 "torch_type": "FloatTensor",
                             }
                         },
                         "data": [],
                         "torch_type": "FloatTensor",
                     }
                 },
                 "grad": {
                     "__Variable__": {
                         "child": {
                             "___LocalTensor__": {
                                 "id": 77_824_091_007,
                                 "owner": "me",
                                 "torch_type": "Variable",
                             }
                         },
                         "data": {
                             "__FloatTensor__": {
                                 "child": {
                                     "___LocalTensor__": {
                                         "id": 32_100_939_892,
                                         "owner": "me",
                                         "torch_type": "FloatTensor",
                                     }
                                 },
                                 "data": [],
                                 "torch_type": "FloatTensor",
                             }
                         },
                         "requires_grad": False,
                         "torch_type": "Variable",
                     }
                 },
                 "requires_grad": False,
                 "torch_type": "Variable",
             }
         },
     }
示例#12
0
    def test_plus_is_minus_backward_remote(self):
        x = sy.Variable(torch.FloatTensor([5, 6]), requires_grad=True)
        y = sy.Variable(torch.FloatTensor([3, 4]), requires_grad=True)
        x = sy._PlusIsMinusTensor().on(x)
        y = sy._PlusIsMinusTensor().on(y)
        x.send(bob)
        y.send(bob)

        z = x.add(y).sum()
        z.backward()

        # cut chain for the equality check
        x.get()
        x.child = x.child.child

        target = sy._PlusIsMinusTensor().on(torch.FloatTensor([1, 1]))
        assert torch.equal(x.grad.data, target)
示例#13
0
 def init_grad_(self):
     """
     Initialise grad as an empty tensor
     """
     self.grad = sy.Variable(sy.zeros(self.size()).type(type(self.data)))
     self.grad.native_set_()
     self.grad.child.owner = self.owner
     self.grad.data.child.owner = self.owner
示例#14
0
    def deser(msg_obj, worker, acquire):
        obj_type, msg_obj = torch_utils.extract_type_and_obj(msg_obj)
        var_syft_obj = sy._SyftTensor.deser_routing(msg_obj['child'], worker,
                                                    acquire)

        if var_syft_obj.parent is not None and var_syft_obj.child is not None:
            return var_syft_obj.parent

        # Deser the var.data
        var_data_type, var_data_tensor = torch_utils.extract_type_and_obj(
            msg_obj['data'])
        if torch_utils.is_tensor(var_data_type):
            var_data = torch.guard['syft.' + var_data_type].deser(
                msg_obj['data'], worker, acquire)
            worker.hook.local_worker.de_register(var_data)
        else:
            raise TypeError('Data is not a tensor:', var_data_type)

        variable = sy.Variable(var_data,
                               requires_grad=msg_obj['requires_grad'])

        # Deser the var.grad
        if 'grad' in msg_obj:
            var_grad_type, var_grad_tensor = torch_utils.extract_type_and_obj(
                msg_obj['grad'])
            var_grad = torch.guard['syft.' + var_grad_type].deser(
                msg_obj['grad'], worker, acquire)
            worker.hook.local_worker.de_register(var_grad)
            variable.assign_grad_(var_grad)
        else:
            var_grad = None

        # TODO: Find a smart way to skip register and not leaking the info to the local worker
        # This would imply overload differently the __init__ to provide an owner for the child attr.
        worker.hook.local_worker.de_register(variable)
        worker.hook.local_worker.de_register(variable.data)
        if variable.grad is not None:
            worker.hook.local_worker.de_register(variable.grad)
            worker.hook.local_worker.de_register(variable.grad.data)

        variable.child = var_syft_obj
        var_syft_obj.parent = variable

        # Re-assign the data, and propagate deeply
        if var_grad is None:
            torch_utils.link_var_chain_to_data_chain(variable, var_data)
        else:
            torch_utils.link_var_chain_to_data_and_grad_chains(
                variable, var_data, var_grad)

        return variable
示例#15
0
    def test_local_var_unary_methods(self):
        ''' Unit tests for methods mentioned on issue 1385
            https://github.com/OpenMined/PySyft/issues/1385'''

        x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
        assert torch.equal(x.abs(), sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5])))
        assert torch.equal(x.abs_(), sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5])))
        x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
        assert torch.equal(x.cos().int(), sy.Variable(torch.IntTensor(
            [0, 0, 0, 0, 0])))
        x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
        assert torch.equal(x.cos_().int(), sy.Variable(torch.IntTensor(
            [0, 0, 0, 0, 0])))
        x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
        assert torch.equal(x.ceil(), x)
        assert torch.equal(x.ceil_(), x)
        assert torch.equal(x.cpu(), x)
示例#16
0
 def test_torch_F_relu_on_remote_var(self):
     x = sy.Variable(torch.FloatTensor([[1, -1], [-1, 1]]))
     x.send(bob)
     x = F.relu(x)
     x.get()
     assert torch.equal(x, sy.Variable(torch.FloatTensor([[1, 0], [0, 1]])))
示例#17
0
 def test_torch_function_with_multiple_output_on_remote_var(self):
     x = sy.Variable(torch.FloatTensor([[1, 2], [4, 3], [5, 6]]))
     x.send(bob)
     y, z = torch.max(x, 1)
     y.get()
     assert torch.equal(y, sy.Variable(torch.FloatTensor([2, 4, 6])))
示例#18
0
 def test_operation_with_variable_and_parameter(self):
     x = sy.Parameter(sy.FloatTensor([1]))
     y = sy.Variable(sy.FloatTensor([1]))
     z = x * y
     assert torch.equal(z, sy.Variable(sy.FloatTensor([1])))