Example #1
0
def test_pointer_found_exception(workers):
    ptr_id = syft.ID_PROVIDER.pop()
    pointer = PointerTensor(id=ptr_id, location=workers["alice"], owner=workers["me"])

    try:
        raise RemoteObjectFoundError(pointer)
    except RemoteObjectFoundError as err:
        err_pointer = err.pointer
        assert isinstance(err_pointer, PointerTensor)
        assert err_pointer.id == ptr_id
Example #2
0
                        tensor = tensor.wrap()

            return tensor
        # Else we keep the same Pointer
        else:
            location = syft.hook.local_worker.get_worker(worker_id)

            ptr = PointerTensor(
                location=location,
                id_at_location=obj_id_at_location,
                owner=worker,
                id=obj_id,
                shape=shape,
                garbage_collect_data=garbage_collect_data,
            )

            return ptr

    @staticmethod
    def get_protobuf_schema() -> PointerTensorPB:
        return PointerTensorPB


### Register the tensor with hook_args.py ###
register_type_rule({PointerTensor: one})
register_forward_func({
    PointerTensor:
    lambda p: (_ for _ in ()).throw(RemoteObjectFoundError(p))
})
register_backward_func({PointerTensor: lambda i: i})
Example #3
0
            ptr = PointerTensor(
                location=location,
                id_at_location=id_at_location,
                owner=worker,
                id=obj_id,
                shape=shape,
                garbage_collect_data=garbage_collect_data,
            )

            return ptr

        # a more general but slower/more verbose option

        # new_data = {}
        # for k, v in data.items():
        #     key = k.decode()
        #     if type(v) is bytes:
        #         val_str = v.decode()
        #         val = syft.local_worker.get_worker(val_str)
        #     else:
        #         val = v
        #     new_data[key] = val
        # return PointerTensor(**new_data)


### Register the tensor with hook_args.py ###
register_type_rule({PointerTensor: one})
register_forward_func({PointerTensor: lambda p: (_ for _ in ()).throw(RemoteObjectFoundError(p))})
register_backward_func({PointerTensor: lambda i: i})
Example #4
0
    np.ndarray: one,
    # should perhaps be of type ShareDict extending dict or something like this
    LoggingTensor: one,
    FixedPrecisionTensor: one,
    AutogradTensor: one,
    AdditiveSharingTensor: one,
    MultiPointerTensor: one,
    PointerTensor: one,
    LargePrecisionTensor: one,
    torch.Tensor: one,
    torch.nn.Parameter: one,
}

# Dict to return the proper lambda function for the right torch or syft tensor type
forward_func = {
    PointerTensor: lambda p: (_ for _ in ()).throw(RemoteObjectFoundError(p)),
    torch.Tensor: lambda i: i.child
    if has_attr(i, "child")
    else (_ for _ in ()).throw(PureTorchTensorFoundError),
    torch.nn.Parameter: lambda i: i.child
    if has_attr(i, "child")
    else (_ for _ in ()).throw(PureTorchTensorFoundError),
    LoggingTensor: lambda i: i.child,
    FixedPrecisionTensor: lambda i: i.child,
    AutogradTensor: lambda i: i.child,
    AdditiveSharingTensor: lambda i: i.child,
    MultiPointerTensor: lambda i: i.child,
    LargePrecisionTensor: lambda i: i._internal_representation_to_large_ints(),
    "my_syft_tensor_type": lambda i: i.child,
}