This function reconstructs a PrivateTensor given it's attributes in form of a tuple. Args: worker (AbstractWorker): the worker doing the deserialization tensor_tuple (tuple): a tuple holding the attributes of the PrivateTensor Returns: PrivateTensor: a PrivateTensor Examples: shared_tensor = detail(data) """ tensor_id, allowed_users, tags, description, chain = tensor_tuple tensor = PrivateTensor( owner=worker, id=syft.serde.msgpack.serde._detail(worker, tensor_id), tags=syft.serde.msgpack.serde._detail(worker, tags), description=syft.serde.msgpack.serde._detail(worker, description), allowed_users=syft.serde.msgpack.serde._detail( worker, allowed_users), ) if chain is not None: chain = syft.serde.msgpack.serde._detail(worker, chain) tensor.child = chain return tensor ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(PrivateTensor)
worker, proto_prec_tensor.child) tensor = FixedPrecisionTensor( owner=worker, id=proto_id, field=proto_prec_tensor.field, dtype=proto_prec_tensor.dtype, base=proto_prec_tensor.base, precision_fractional=proto_prec_tensor.precision_fractional, kappa=proto_prec_tensor.kappa, tags=set(proto_prec_tensor.tags), description=proto_prec_tensor.description, ) tensor.child = child return tensor @staticmethod def get_protobuf_schema(): """ Returns the protobuf schema used for FixedPrecisionTensor. Returns: Protobuf schema for FixedPrecisionTensor. """ return FixedPrecisionTensorPB ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(FixedPrecisionTensor)
tensor_id = sy.serde.protobuf.proto.get_protobuf_id(protobuf_tensor.id) crypto_provider_id = sy.serde.protobuf.proto.get_protobuf_id( protobuf_tensor.crypto_provider_id) field = int( getattr(protobuf_tensor, protobuf_tensor.WhichOneof("field_size"))) dtype = protobuf_tensor.dtype tensor = AdditiveSharingTensor( owner=worker, id=tensor_id, field=field, dtype=dtype, crypto_provider=worker.get_worker(crypto_provider_id), ) if protobuf_tensor.location_ids is not None: chain = {} for pb_location_id, share in zip(protobuf_tensor.location_ids, protobuf_tensor.shares): location_id = sy.serde.protobuf.proto.get_protobuf_id( pb_location_id) chain[location_id] = sy.serde.protobuf.serde._unbufferize( worker, share) tensor.child = chain return tensor ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(AdditiveSharingTensor)
@staticmethod def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "MultiPointerTensor": """ This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple. Args: worker: the worker doing the deserialization tensor_tuple: a tuple holding the attributes of the MultiPointerTensor Returns: MultiPointerTensor: a MultiPointerTensor Examples: multi_pointer_tensor = detail(data) """ tensor_id, chain = tensor_tuple tensor = sy.MultiPointerTensor(owner=worker, id=sy.serde.msgpack.serde._detail( worker, tensor_id)) if chain is not None: chain = sy.serde.msgpack.serde._detail(worker, chain) tensor.child = chain return tensor ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(MultiPointerTensor)
return protobuf_placeholder @staticmethod def unbufferize(worker: AbstractWorker, protobuf_placeholder: PlaceholderPB) -> "PlaceHolder": """ This function reconstructs a PlaceHolder given it's attributes in form of a Protobuf message. Args: worker: the worker doing the deserialization protobuf_placeholder: a Protobuf message holding the attributes of the PlaceHolder Returns: PlaceHolder: a PlaceHolder """ tensor_id = syft.serde.protobuf.proto.get_protobuf_id( protobuf_placeholder.id) tags = set(protobuf_placeholder.tags) description = None if bool(protobuf_placeholder.description): description = protobuf_placeholder.description return PlaceHolder(owner=worker, id=tensor_id, tags=tags, description=description) ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(PlaceHolder)
if chain is not None: chain = syft.serde._detail(worker, chain) tensor.child = chain return tensor def get_class_attributes(self): return { "base": self.base, "precision_fractional": self.precision_fractional } _moduli_for_fields = { 21: [3, 7], # Still here for some small tests "int64": [257, 263, 269, 271, 277, 281, 283, 293], "int100": [1201, 1433, 1217, 1237, 1321, 1103, 1129, 1367, 1093, 1039], "int128": [883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977], } _sizes_for_fields = { 21: 21, # Still here for some small tests "int64": 31_801_718_393_038_504_727, "int100": 6_616_464_272_061_971_915_798_970_247_351, "int128": 403_323_543_826_671_667_708_586_382_524_878_143_061, } # Should we also precompute reconstruction coefficients and put them here? ### Register the tensor with hook_args.py ### default_register_tensor(CRTPrecisionTensor)
from syft.generic.frameworks.hook import hook_args from syft.generic.abstract.tensor import AbstractTensor class HookedTensor(AbstractTensor): """HookedTensor is an abstraction which should not be used directly on its own. Its purpose is only to allow other tensors to extend it so that they automatically have all of the Torch method hooked without having to add it to the hook.py file. """ def __init__(self, owner=None, id=None, tags=None, description=None, verbose=False): """Initializes a HookedTensor. Args: owner (BaseWorker): An optional BaseWorker object to specify the worker on which the tensor is located. id (str or int): An optional string or integer id of the LargePrecisionTensor. tags (list): list of tags for searching. description (str): a description of this tensor. """ super().__init__(id=id, owner=owner, tags=tags, description=description) self.verbose = verbose ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(HookedTensor)
def __get_shares_map(self): return self.child def __set_shares_map(self, shares_map): self.child = shares_map return self @property def shape(self): return self.retrieve_pointers()[0].shape @property def players(self): return self.__get_players() def __repr__(self): return self.__str__() def __str__(self): type_name = type(self).__name__ out = f"[" f"{type_name}]" if self.child is not None: for v in self.child.values(): out += "\n\t-> " + str(v) return out ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(ReplicatedSharingTensor)
Returns: PromiseTensor: a PromiseTensor Examples: shared_tensor = detail(data) """ id, shape, tensor_type, plans, tags, description = tensor_tuple id = sy.serde._detail(worker, id) shape = sy.serde._detail(worker, shape) tensor_type = sy.serde._detail(worker, tensor_type) plans = sy.serde._detail(worker, plans) tags = sy.serde._detail(worker, tags) description = sy.serde._detail(worker, description) tensor = PromiseTensor( owner=worker, id=id, shape=shape, tensor_type=tensor_type, plans=plans, tags=tags, description=description, ) return tensor ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(PromiseTensor)
Returns: A String object """ # Get the contents of the tuple represening the simplified object simple_child, id, tags, description = simple_obj def detail_bstr(b_str): return str(b_str, encoding="utf-8") if b_str else None # It appears that all strings are converted to bytes objects # after deserialization, convert them back to strings tags = sy.serde._detail(worker, tags) description = sy.serde._detail(worker, description) # Rebuild the str child our of the simplified child (the bytes child) child = sy.serde._detail(worker, simple_child) return String(object=child, id=id, owner=worker, tags=tags, description=description) ### Register the String object with hook_args.py ### hook_args.default_register_tensor(String)
def get_class_attributes(self): """ Specify all the attributes need to build a wrapper correctly when returning a response, """ # TODO: what we should return specific for this one? return {} @property def data(self): return self @data.setter def data(self, new_data): self.tensor = new_data.child return self @tracer(method_name="add") def add(self, other): return SyftCrypTensor(tensor=self.tensor) __add__ = add __radd__ = add @tracer(method_name="get_plain_text") def get_plain_text(self, dst=None): return SyftCrypTensor(tensor=self.tensor) ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(SyftCrypTensor)
""" super().__init__(id=id, owner=owner, tags=tags, description=description) self.verbose = verbose if isinstance(numpy_tensor, list): numpy_tensor = np.array(numpy_tensor) self.child = numpy_tensor @overloaded.method def mm(self, _self, other): return _self.dot(other) @overloaded.method def transpose(self, _self, *dims): # TODO: the semantics of the .transpose() dimensions are a bit different # for Numpy than they are for PyTorch. Fix this. # Related: https://github.com/pytorch/pytorch/issues/7609 return _self.transpose(*reversed(dims)) def create_numpy_tensor(numpy_tensor): return NumpyTensor(numpy_tensor).wrap() ### Register the tensor with hook_args.py ### hook_args.default_register_tensor(NumpyTensor)