Exemplo n.º 1
0
    def __init__(self,
                 torch,
                 local_worker: BaseWorker = None,
                 is_client: bool = True,
                 verbose: bool = True):
        """Initializes the hook.

        Initialize the hook and define all the attributes pertaining to the
        torch hook in a special TorchAttibute class, that will be added in the
        syft.torch attributes. Hence, this parameters are now conveyed by the
        syft module.
        """
        # Save the provided torch module as an attribute of the hook
        self.torch = torch
        self.framework = self.torch

        # Save the local worker as an attribute
        self.local_worker = local_worker

        if hasattr(torch, "torch_hooked"):
            logging.warning(
                "Torch was already hooked... skipping hooking process")
            self.local_worker = syft.local_worker
            return
        else:
            torch.torch_hooked = True

        # Add all the torch attributes in the syft.torch attr
        syft.torch = TorchAttributes(torch, self)
        syft.framework = syft.torch

        self.trace = Trace()

        # Hook some torch methods such that tensors could be created directy at workers
        self._hook_worker_methods()

        if self.local_worker is None:
            # Every TorchHook instance should have a local worker which is
            # responsible for interfacing with other workers. The worker
            # interface is what allows the Torch specific code in TorchHook to
            # be agnostic to the means by which workers communicate (such as
            # peer-to-peer, sockets, through local ports, or all within the
            # same process)
            self.local_worker = VirtualWorker(hook=self,
                                              is_client_worker=is_client,
                                              id="me")
        else:
            self.local_worker.hook = self

        self.to_auto_overload = {}

        self.args_hook_for_overloaded_attr = {}

        self._hook_native_tensor(torch.Tensor, TorchTensor)

        # Add all hooked tensor methods to pointer but change behaviour to have the cmd sent
        self._hook_pointer_tensor_methods(self.torch.Tensor)

        # Add all hooked tensor methods to AdditiveSharingTensor tensor but change behaviour
        # to all shares (when it makes sense, otherwise the method is overwritten in the
        # AdditiveSharingTensor class)
        self._hook_additive_shared_tensor_methods()

        # Add all hooked tensor methods to multi_pointer to change behavior to have the cmd
        # sent to all child pointers.
        self._hook_multi_pointer_tensor_methods(self.torch.Tensor)

        # Add all hooked tensor methods to Logging tensor but change behaviour to just forward
        # the cmd to the next child (behaviour can be changed in the SyftTensor class file)
        self._hook_syft_tensor_methods(LoggingTensor)

        # Add all hooked tensor methods to Paillier tensor but change behaviour to just forward
        # the cmd to the next child (behaviour can be changed in the SyftTensor class file)
        self._hook_syft_tensor_methods(PaillierTensor)

        # Add all hooked tensor methods to FixedPrecisionTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_tensor_methods(FixedPrecisionTensor)

        # Add all hooked tensor methods to AutogradTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_tensor_methods(AutogradTensor)

        # Add all hooked tensor methods to PrivateTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_private_tensor_methods(PrivateTensor)

        # Add all hooked tensor methods to PlaceHolder tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_placeholder_methods(self.torch.Tensor, PlaceHolder)

        # Add all hooked tensor methods to AdditiveSharingTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_tensor_methods(AdditiveSharingTensor)

        # Add all hooked tensor methods to LargePrecisionTensor tensor
        self._hook_syft_tensor_methods(LargePrecisionTensor)

        # Add all hooked tensor methods to NumpyTensor tensor
        self._hook_syft_tensor_methods(HookedTensor)

        # Add all built-in 'str' methods to String
        self._hook_string_methods(owner=self.local_worker)

        # Add all string methods to StringPointer
        # This method call should strictly come after the
        # call to self._hook_string_methods()
        self._hook_string_pointer_methods()

        # Hook the tensor constructor function
        self._hook_tensor()

        # Hook the Parameter methods to store tensor chains in parameters
        self._hook_parameters()

        # Hook torch functions from modules like torch.add OR torch.nn.functional (containing relu, etc.)
        self._hook_torch_module()

        # Hook torch.nn (containing Linear and Convolution layers)
        self._hook_module()

        # Hook torch.optim (containing optim.SGD, Adam, etc)
        self._hook_optim()

        # Add the local_worker to syft so that it can be found if the hook is
        # called several times
        syft.local_worker = self.local_worker
        syft.hook = self
Exemplo n.º 2
0
    def __init__(
        self,
        torch,
        local_worker: BaseWorker = None,
        is_client: bool = True,
        verbose: bool = False,
        seed=None,
    ):
        """
        Initializes the hook.

        Initialize the hook and define all the attributes pertaining to the
        torch hook in a special TorchAttibute class, that will be added in the
        syft.torch attributes. Hence, this parameters are now conveyed by the
        syft module.
        """
        # Save the provided torch module as an attribute of the hook
        self.torch = torch
        self.framework = self.torch
        if seed is not None:
            syft.ID_PROVIDER.seed(seed)
        self.verbose = verbose

        # Save the local worker as an attribute
        self.local_worker = local_worker

        if hasattr(torch, "torch_hooked"):
            logging.warning("Torch was already hooked... skipping hooking process")
            self.local_worker = syft.local_worker
            return
        else:
            torch.torch_hooked = True

        # Add all the torch attributes in the syft.torch attr
        syft.torch = TorchAttributes(torch, self)
        syft.framework = syft.torch

        """
        In Syft there is a syft.framework value that can contain only one framework.
        Ideally it should contain a list of supported frameworks.

        We do this because in Plans there is method to reduce the number of actions
        that are traced (and then sent).
        The actions that are not returning a result, changing a placeholder, inplace
        or changing the global state are eliminated from the traced list
        """
        if dependency_check.crypten_available:
            import crypten
            from syft.frameworks.crypten.crypten_attributes import CryptenAttributes

            syft.crypten = CryptenAttributes(crypten, self)

        # Hook some torch methods such that tensors could be created directy at workers
        self._hook_worker_methods()

        if self.local_worker is None:
            # Every TorchHook instance should have a local worker which is
            # responsible for interfacing with other workers. The worker
            # interface is what allows the Torch specific code in TorchHook to
            # be agnostic to the means by which workers communicate (such as
            # peer-to-peer, sockets, through local ports, or all within the
            # same process)
            self.local_worker = VirtualWorker(
                hook=self, is_client_worker=is_client, id="me", verbose=verbose
            )
        else:
            self.local_worker.hook = self

        self._syft_workers = {self.local_worker}

        self.to_auto_overload = {}

        self.args_hook_for_overloaded_attr = {}

        self._hook_native_tensor(torch.Tensor, TorchTensor)

        if dependency_check.crypten_available:
            from syft.frameworks.crypten.hook.hook import crypten_to_auto_overload

            for crypten_class, method_names in crypten_to_auto_overload.items():
                self.to_auto_overload[crypten_class] = method_names
                self._hook_syft_placeholder_methods(crypten_class, PlaceHolder)

        # Add all hooked tensor methods to pointer but change behaviour to have the cmd sent
        self._hook_pointer_tensor_methods(self.torch.Tensor)

        # Add all hooked tensor methods to AdditiveSharingTensor tensor but change behaviour
        # to all shares (when it makes sense, otherwise the method is overwritten in the
        # AdditiveSharingTensor class)
        self._hook_additive_shared_tensor_methods()

        # Add all hooked tensor methods to multi_pointer to change behavior to have the cmd
        # sent to all child pointers.
        self._hook_multi_pointer_tensor_methods(self.torch.Tensor)

        # Add all hooked tensor methods to Logging tensor but change behaviour to just forward
        # the cmd to the next child (behaviour can be changed in the SyftTensor class file)
        self._hook_syft_tensor_methods(LoggingTensor)

        # Add all hooked tensor methods to Paillier tensor but change behaviour to just forward
        # the cmd to the next child (behaviour can be changed in the SyftTensor class file)
        self._hook_syft_tensor_methods(PaillierTensor)

        # Add all hooked tensor methods to FixedPrecisionTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_tensor_methods(FixedPrecisionTensor)

        # Add all hooked tensor methods to AutogradTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_tensor_methods(AutogradTensor)

        # Add all hooked tensor methods to PrivateTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_private_tensor_methods(PrivateTensor)

        # Add all hooked tensor methods to PlaceHolder tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_placeholder_methods(self.torch.Tensor, PlaceHolder)

        # Add all hooked tensor methods to AdditiveSharingTensor tensor but change behaviour
        # to just forward the cmd to the next child (behaviour can be changed in the
        # SyftTensor class file)
        self._hook_syft_tensor_methods(AdditiveSharingTensor)

        # Add all hooked tensor methods to NumpyTensor tensor
        self._hook_syft_tensor_methods(HookedTensor)

        # Add all built-in 'str' methods to String
        self._hook_string_methods(owner=self.local_worker)

        # Add all string methods to StringPointer
        # This method call should strictly come after the
        # call to self._hook_string_methods()
        self._hook_string_pointer_methods()

        # Hook the tensor constructor function
        self._hook_tensor()

        # Hook the Parameter methods to store tensor chains in parameters
        self._hook_parameters()

        # Hook torch functions from modules like torch.add OR
        # torch.nn.functional (containing relu, etc.)
        self._hook_torch_module()

        # Hook torch.nn (containing Linear and Convolution layers)
        self._hook_module()

        # Hook torch.optim (containing optim.SGD, Adam, etc)
        self._hook_optim()

        # Hook the Crypten module
        if dependency_check.crypten_available:
            from syft.frameworks.crypten.hook.hook import hook_crypten, hook_crypten_module

            hook_crypten()
            hook_crypten_module()

        # Add the local_worker to syft so that it can be found if the hook is
        # called several times
        syft.local_worker = self.local_worker
        syft.hook = self