Ejemplo n.º 1
0
    def request_remote_run(self, location: "sy.workers.BaseWorker", args, kwargs) -> object:
        """Requests protocol execution.

        Send a request to execute the protocol on the remote location.

        Args:
            location: to which worker the request should be sent
            args: Arguments used as input data for the protocol.
            kwargs: Named arguments used as input data for the protocol.

        Returns:
            Execution response.

        """
        plan_name = f"plan{self.id}"
        args, _, _ = hook_args.unwrap_args_from_function(plan_name, args, {})

        # return_ids = kwargs.get("return_ids", {})
        command = ("run", self.id, args, kwargs)

        response = self.owner.send_command(
            message=command, recipient=location  # , return_ids=return_ids
        )
        response = hook_args.hook_response(plan_name, response, wrap_type=FrameworkTensor[0])
        return response
Ejemplo n.º 2
0
    def __add__(self, *args, **kwargs):
        """
        Here is the version of the add method without the decorator: as you can see
        it is much more complicated. However you misght need sometimes to specify
        some particular behaviour: so here what to start from :)
        """

        if isinstance(args[0], th.Tensor):
            data = self.child + args[0].numpy()
            obj = PaillierTensor()
            obj.child = data
            return obj

        if isinstance(self.child, th.Tensor):
            self.child = self.child.numpy()

        # Replace all syft tensor with their child attribute
        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
            "__add__", self, args, kwargs)

        # Send it to the appropriates class and get the response
        response = getattr(new_self, "__add__")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response("__add__",
                                           response,
                                           wrap_type=type(self))
        return response
Ejemplo n.º 3
0
    def request_remote_run(
            self, location: AbstractWorker, args,
            kwargs) -> Union[List[PointerTensor], PointerTensor]:
        """
        Requests protocol execution.

        Send a request to execute the protocol on the remote location.

        Args:
            location: to which worker the request should be sent
            args: Arguments used as input data for the protocol.
            kwargs: Named arguments used as input data for the protocol.

        Returns:
            PointerTensor or list of PointerTensors: response from request to
                execute protocol
        """
        plan_name = f"plan{self.id}"
        args, _, _ = hook_args.unwrap_args_from_function(plan_name, args, {})

        # return_ids = kwargs.get("return_ids", {})
        command = ("run", self.id, args, kwargs)

        response = self.owner.send_command(
            message=command,
            recipient=location  # , return_ids=return_ids
        )
        response = hook_args.hook_response(plan_name,
                                           response,
                                           wrap_type=FrameworkTensor[0])
        return response
Ejemplo n.º 4
0
    def handle_func_command(cls, command):
        """
        Receive an instruction for a function to be applied on a Syft Tensor,
        Replace in the args all the LogTensors with
        their child attribute, forward the command instruction to the
        handle_function_command of the type of the child attributes, get the
        response and replace a Syft Tensor on top of all tensors found in
        the response.

        Args:
            command: instruction of a function command: (command name,
            <no self>, arguments[, kwargs_])

        Returns:
            the response of the function command
        """
        cmd, _, args_, kwargs_ = command

        # Check that the function has not been overwritten
        try:
            # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
            cmd = cls.rgetattr(cls, cmd)
        except AttributeError:
            pass
        if not isinstance(cmd, str):
            return cmd(*args_, **kwargs_)

        tensor = args_[0] if not isinstance(args_[0],
                                            (tuple, list)) else args_[0][0]

        # Replace all SyftTensors with their child attribute
        new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(
            cmd, args_, kwargs_)

        results = {}
        for worker, share in new_args[0].items():
            new_type = type(share)
            new_args_worker = tuple(
                AdditiveSharingTensor.select_worker(new_args, worker))

            # build the new command
            new_command = (cmd, None, new_args_worker, new_kwargs)

            # Send it to the appropriate class and get the response
            results[worker] = new_type.handle_func_command(new_command)

        # Put back AdditiveSharingTensor on the tensors found in the response
        response = hook_args.hook_response(
            cmd,
            results,
            wrap_type=cls,
            wrap_args=tensor.get_class_attributes())

        return response
Ejemplo n.º 5
0
            def method_with_grad(*args, **kwargs):
                new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                    name, self, args, kwargs
                )

                result = getattr(new_self, name)(*new_args, **new_kwargs)

                # Put back SyftTensor on the tensors found in the response
                result = hook_args.hook_response(name, result, wrap_type=type(self))
                result.grad_fn = grad_fn(self, *args, **kwargs)

                return result
Ejemplo n.º 6
0
    def request_run_plan(
        self,
        location: "sy.workers.BaseWorker",
        response_ids: List[Union[str, int]],
        *args,
        **kwargs,
    ) -> object:
        """Requests plan execution.

        Send a request to execute the plan on the remote location.

        Args:
            location: to which worker the request should be sent
            response_ids: where the result should be stored
            args: arguments used as input data for the plan
            kwargs: named arguments used as input data for the plan

        Returns:
            Execution response
        """
        plan_name = f"plan{self.id}"

        args = [args, response_ids]

        if location not in self._locations:
            raise RuntimeError(
                f"Requested to run a plan on {location.id} but pointer location(s) is/are",
                self._locations,
            )

        # look for the relevant id in the list of ids
        id_at_location = None
        for loc, id_at_loc in zip(self._locations, self._ids_at_location):
            if loc == location:
                id_at_location = id_at_loc
                break

        response = self.owner.send_command(
            cmd_name="run",
            target=id_at_location,
            args_=tuple(args),
            recipient=location,
            return_ids=tuple(response_ids),
        )
        response = hook_args.hook_response(plan_name,
                                           response,
                                           wrap_type=FrameworkTensor[0])
        if isinstance(response, (list, tuple)):
            for r in response:
                r.garbage_collect_data = False
        else:
            response.garbage_collect_data = False
        return response
Ejemplo n.º 7
0
    def matmul(self, *args, **kwargs):
        """
        Hook manually matmul to add the truncation part which is inherent to multiplication
        in the fixed precision setting
        """

        other = args[0]

        if isinstance(other, FixedPrecisionTensor):
            assert (
                self.precision_fractional == other.precision_fractional
            ), "In matmul, all args should have the same precision_fractional"

        if isinstance(self.child, AdditiveSharingTensor) and isinstance(
                other.child, torch.Tensor):
            # If we try to matmul a FPT>(wrap)>AST with a FPT>torch.tensor,
            # we want to perform AST @ torch.tensor
            new_self = self.child
            new_args = (other, )
            new_kwargs = kwargs

        elif isinstance(other.child, AdditiveSharingTensor) and isinstance(
                self.child, torch.Tensor):
            # If we try to matmul a FPT>torch.tensor with a FPT>(wrap)>AST,
            # we swap operators so that we do the same operation as above
            new_self = other.child
            new_args = (self, )
            new_kwargs = kwargs

        else:
            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                "matmul", self, args, kwargs)

        # Send it to the appropriate class and get the response
        response = getattr(new_self, "matmul")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response(
            "matmul",
            response,
            wrap_type=type(self),
            wrap_args=self.get_class_attributes())

        response %= self.field  # Wrap around the field
        response = response.truncate(other.precision_fractional)

        return response
Ejemplo n.º 8
0
        def overloaded_native_method(self, *args, **kwargs):
            """
            Operate the hooking
            """
            if not hasattr(self, "child"):  # means that it's not a wrapper
                method = getattr(self, f"native_{method_name}")
                # Run the native function with the new args

                try:
                    response = method(*args, **kwargs)
                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

            else:  # means that there is a wrapper to remove
                try:
                    # Replace all torch tensor with their child attribute
                    new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                        method_name, self, args, kwargs
                    )
                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

                # Send the new command to the appropriate class and get the response
                method = getattr(new_self, method_name)
                response = method(*new_args, **new_kwargs)

                response.parents = (self.id, new_self.id)

                # For inplace methods, just directly return self
                if syft.framework.is_inplace_method(method_name):
                    return self

                # Put back the wrappers where needed
                response = hook_args.hook_response(
                    method_name,
                    response,
                    wrap_type=type(self),
                    new_self=self,
                    wrap_args=self.get_class_attributes(),
                )
                if args:
                    response.parents = (self, args[0])
                else:
                    response.parents = self
                response.command = method_name
            return response
Ejemplo n.º 9
0
        def _hook_method_args(self, *args, **kwargs):
            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                attr.__name__, self, args, kwargs)

            # Send it to the appropriate class and get the response
            response = attr(self, new_self, *new_args, **new_kwargs)

            # Put back SyftTensor on the tensors found in the response
            response = hook_args.hook_response(
                attr.__name__,
                response,
                wrap_type=type(self),
                wrap_args=self.get_class_attributes())

            return response
Ejemplo n.º 10
0
    def handle_func_command(cls, command):
        """
        Receive an instruction for a function to be applied on a FixedPrecision Tensor,
        Perform some specific action (like logging) which depends of the
        instruction content, replace in the args all the FPTensors with
        their child attribute, forward the command instruction to the
        handle_function_command of the type of the child attributes, get the
        response and replace a FixedPrecision on top of all tensors found in
        the response.
        :param command: instruction of a function command: (command name,
        <no self>, arguments[, kwargs_])
        :return: the response of the function command
        """
        cmd_name, _, args_, kwargs_ = command

        tensor = args_[0] if not isinstance(args_[0],
                                            (tuple, list)) else args_[0][0]

        # Check that the function has not been overwritten
        cmd = None
        try:
            # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
            cmd = cls.rgetattr(cls, cmd_name)
        except AttributeError:
            pass

        if cmd is not None:
            return cmd(*args_, **kwargs_)

        # Replace all FixedPrecisionTensor with their child attribute
        new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(
            cmd_name, args_, kwargs_)

        # build the new command
        new_command = (cmd_name, None, new_args, new_kwargs)

        # Send it to the appropriate class and get the response
        response = new_type.handle_func_command(new_command)

        # Put back FixedPrecisionTensor on the tensors found in the response
        response = hook_args.hook_response(
            cmd_name,
            response,
            wrap_type=cls,
            wrap_args=tensor.get_class_attributes())

        return response
Ejemplo n.º 11
0
        def overloaded_syft_method(self, *args, **kwargs):
            """
            Operate the hooking
            """
            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                attr, self, args, kwargs
            )

            # Send it to the appropriate class and get the response
            response = getattr(new_self, attr)(*new_args, **new_kwargs)

            # Put back SyftTensor on the tensors found in the response
            response = hook_args.hook_response(
                attr, response, wrap_type=type(self), wrap_args=self.get_class_attributes()
            )

            return response
Ejemplo n.º 12
0
    def manual_add(self, *args, **kwargs):
        """
        Here is the version of the add method without the decorator: as you can see
        it is much more complicated. However you might need sometimes to specify
        some particular behaviour: so here what to start from :)
        """
        # Replace all syft tensor with their child attribute
        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
            "add", self, args, kwargs)

        print("Log method manual_add")
        # Send it to the appropriate class and get the response
        response = getattr(new_self, "add")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response("add",
                                           response,
                                           wrap_type=type(self))
        return response
Ejemplo n.º 13
0
    def handle_func_command(cls, command):
        """
        Receive an instruction for a function to be applied on a Syft Tensor,
        Replace in the args all the LogTensors with
        their child attribute, forward the command instruction to the
        handle_function_command of the type of the child attributes, get the
        response and replace a Syft Tensor on top of all tensors found in
        the response.

        Args:
            command: instruction of a function command: (command name,
            <no self>, arguments[, kwargs])

        Returns:
            the response of the function command
        """
        cmd, _, args, kwargs = command

        # Check that the function has not been overwritten
        try:
            # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
            cmd = cls.rgetattr(cls, cmd)
            return cmd(*args, **kwargs)
        except AttributeError:
            pass

        # Replace all LoggingTensor with their child attribute
        new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(
            cmd, args, kwargs)

        # build the new command
        new_command = (cmd, None, new_args, new_kwargs)

        # Do a generic action depending og the call
        cls.on_function_call(new_command)

        # Send it to the appropriate class and get the response
        response = new_type.handle_func_command(new_command)

        # Put back LoggingTensor on the tensors found in the response
        response = hook_args.hook_response(cmd, response, wrap_type=cls)

        return response
Ejemplo n.º 14
0
    def execute_plan(self, args: List, result_ids: List[Union[str, int]]):
        """Controls local or remote plan execution.

        If the plan doesn't have the plan built, first build it using the blueprint.
        Then if it has a remote location, send the plan to the remote location only the
        first time, request a remote plan execution with specific pointers and ids for
        storing the result, and return a pointer to the result of the execution.
        If the plan is local: update the plan with the result_ids and args ids given,
        run the plan and return the None message serialized.

        Args:
            args: Arguments used to run plan.
            result_ids: List of ids where the results will be stored.
        """
        # We build the plan only if needed
        if not self.is_built:
            self._build(args)

        if len(self.locations):
            plan_name = f"plan{self.id}"
            # args, _, _ = hook_args.unwrap_args_from_function(
            #     plan_name, args, {}
            # )

            worker = self.find_location(args)
            if worker.id not in self.ptr_plans.keys():
                self.ptr_plans[worker.id] = self._send(worker)
            response = self.request_execute_plan(worker, result_ids, *args)

            response = hook_args.hook_response(plan_name, response, wrap_type=FrameworkTensor[0])
            return response

        # if the plan is not to be sent then it has been requested to be executed,
        # so we update the plan with the
        # correct input and output ids and we run it
        elif not len(self.locations):
            self._update_args(args, result_ids)
            self._execute_plan()
            responses = self._get_plan_output(result_ids)
            return responses

        return sy.serde.serialize(None)
Ejemplo n.º 15
0
        def overloaded_attr(self, *args, **kwargs):
            """
            Operate the hooking
            """

            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                attr, self, args, kwargs
            )

            results = {}
            for k, v in new_self.items():
                results[k] = v.__getattribute__(attr)(*dispatch(new_args, k), **new_kwargs)

            # Put back MultiPointerTensor on the tensors found in the response
            response = hook_args.hook_response(
                attr, results, wrap_type=MultiPointerTensor, wrap_args=self.get_class_attributes()
            )

            return response
Ejemplo n.º 16
0
        def _hook_function_args(*args, **kwargs):

            # TODO have a better way to infer the type of tensor -> this is implies
            # that the first argument is a tensor (even if this is the case > 99%)
            tensor = args[0] if not isinstance(args[0], tuple) else args[0][0]
            cls = type(tensor)

            # Replace all syft tensor with their child attribute
            new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(
                attr.__name__, args, kwargs)

            # Send it to the appropriate class and get the response
            response = attr(*new_args, **new_kwargs)

            # Put back SyftTensor on the tensors found in the response
            response = hook_args.hook_response(
                attr.__name__,
                response,
                wrap_type=cls,
                wrap_args=tensor.get_class_attributes())

            return response
Ejemplo n.º 17
0
    def handle_func_command(cls, command):
        """
        Receive an instruction for a function to be applied on a AutogradTensor,
        Perform some specific action (like logging) which depends of the
        instruction content, replace in the args all the LogTensors with
        their child attribute, forward the command instruction to the
        handle_function_command of the type of the child attributes, get the
        response and replace a AutogradTensor on top of all tensors found in
        the response.
        :param command: instruction of a function command: (command name,
        <no self>, arguments[, kwargs_])
        :return: the response of the function command
        """

        cmd, _, args_, kwargs_ = command

        # Check that the function has not been overwritten
        try:
            # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
            cmd = cls.rgetattr(cls, cmd)
            return cmd(*args_, **kwargs_)
        except AttributeError:
            pass

        # Replace all AutogradTensor with their child attribute
        new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(
            cmd, args_, kwargs_)

        # build the new command
        new_command = (cmd, None, new_args, new_kwargs)

        # Send it to the appropriate class and get the response
        response = new_type.handle_func_command(new_command)

        # Put back AutogradTensor on the tensors found in the response
        response = hook_args.hook_response(cmd, response, wrap_type=cls)

        return response
Ejemplo n.º 18
0
    def request_execute_plan(
        self,
        location: "sy.workers.BaseWorker",
        response_ids: List[Union[str, int]],
        *args,
        **kwargs,
    ) -> object:
        """Requests plan execution.

        Send a request to execute the plan on the remote location.

        Args:
            location: to which worker the request should be sent
            response_ids: where the result should be stored
            args: arguments used as input data for the plan
            kwargs: named arguments used as input data for the plan

        Returns:
            Execution response
        """
        plan_name = f"plan{self.id}"
        # args, _, _ = hook_args.unwrap_args_from_function(
        #     plan_name, args, {}
        # )
        args = [args, response_ids]

        command = ("execute_plan", self.id_at_location, args, kwargs)

        response = self.owner.send_command(message=command,
                                           recipient=location,
                                           return_ids=response_ids)
        response = hook_args.hook_response(plan_name,
                                           response,
                                           wrap_type=FrameworkTensor[0])
        response.garbage_collect_data = False
        return response
Ejemplo n.º 19
0
    def handle_func_command(cls, command):
        """
        Operates as a router for functions. A function call always starts
        by being handled here and 3 scenarii must be considered:

        Real TensorFlow tensor:
            The arguments of the function are real tensors so we should
            run the native TensorFlow command

        TensorFlow wrapper:
            The arguments are just wrappers at the top of a chain
            (ex: wrapper>LoggingTensor>TensorFlow tensor), so just forward
            the instruction to the next layer type in the chain (in
            the example above to LoggingTensor.handle_func_command),
            get the response and replace a wrapper on top of all tensors
            found in the response.

        Syft Tensor:
            The arguments are syft tensors of same type: this can happen
            if at any node of the chain where some function is forwarded,
            the handle_func_command modify the function and make a new
            call but keeps the arguments "un-wrapped". Making a new call
            means that by default the command is treated here in the
            global router.

        :param command: instruction of a function command: (command name,
        <no self>, arguments[, kwargs])
        :return: the response of the function command
        """
        cmd, _, args, kwargs = command

        try:  # will work if tensors are wrappers

            # Replace all TensorFlow tensor with their child attribute
            # Note that we return also args_type which helps handling case 3 in the docstring
            new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(
                cmd, args, kwargs, return_args_type=True)
            # This handles case 3: it redirects the command to the appropriate class depending
            # of the syft type of the arguments and returns
            if args_type not in FrameworkTensor:
                return args_type.handle_func_command(command)

            # build the new command
            new_command = (cmd, None, new_args, new_kwargs)
            # Send it to the appropriate class and get the response
            response = new_type.handle_func_command(new_command)
            # Put back the wrappers where needed
            response = hook_args.hook_response(cmd,
                                               response,
                                               wrap_type=args_type)
        except PureFrameworkTensorFoundError:  # means that it's not a wrapper but a pure tensor

            # Check that the function has not been overwritten
            try:
                # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
                command = cls.rgetattr(cls, cmd)
                return command(*args, **kwargs)
            except AttributeError:
                pass

            # TODO: clean this line
            cmd_split = cmd.split(".")
            cmd_path = cmd_split[:-1]
            cmd_name = cmd_split[-1]
            cmd = "syft.local_worker.hook." + ".".join(
                cmd_path) + ".native_" + cmd_name

            # Run the native function with the new args
            # Note the the cmd should already be checked upon reception by the worker
            # in the execute_command function
            if isinstance(args, tuple):
                response = eval(cmd)(*args, **kwargs)
            else:
                response = eval(cmd)(args, **kwargs)

        return response
Ejemplo n.º 20
0
    def handle_func_command(cls, command):
        """
        Operates as a router for functions. A function call always starts
        by being handled here and 3 scenarii must be considered:
        Real Torch tensor:
            The arguments of the function are real tensors so we should
            run the native torch command
        Torch wrapper:
            The arguments are just wrappers at the top of a chain
            (ex: wrapper>LoggingTensor>Torch tensor), so just forward
            the instruction to the next layer type in the chain (in
            the example above to LoggingTensor.handle_func_command),
            get the response and replace a wrapper on top of all tensors
            found in the response.
        Syft Tensor:
            The arguments are syft tensors of same type: this can happen
            if at any node of the chain where some function is forwarded,
            the handle_func_command modify the function and make a new
            call but keeps the arguments "un-wrapped". Making a new call
            means that by default the command is treated here in the
            global router.
        :param command: instruction of a function command: (command name,
        <no self>, arguments[, kwargs_])
        :return: the response of the function command
        """
        cmd, _, args_, kwargs_ = command

        try:  # will work if tensors are wrappers
            # Replace all torch tensor with their child attribute
            # Note that we return also args_type which helps handling case 3 in the docstring
            new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(
                cmd, args_, kwargs_, return_args_type=True)
            # This handles case 3: it redirects the command to the appropriate class depending
            # of the syft type of the arguments and returns
            if args_type not in FrameworkTensor:
                return args_type.handle_func_command(command)
            # build the new command
            new_command = (cmd, None, new_args, new_kwargs)

            # Check that the function has not been overwritten
            try:
                # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
                command = cls.rgetattr(cls, cmd)
                return command(*args_, **kwargs_)
            except AttributeError:
                pass

            # Send it to the appropriate class and get the response
            try:
                response = new_type.handle_func_command(new_command)
            except RuntimeError:
                # Change the library path to avoid errors on layers like AvgPooling
                list_new_command = list(new_command)
                list_new_command[0] = cls._fix_torch_library(new_command[0])
                new_command = tuple(list_new_command)
                response = new_type.handle_func_command(new_command)

            # Put back the wrappers where needed
            response = hook_args.hook_response(cmd,
                                               response,
                                               wrap_type=args_type)
        except PureFrameworkTensorFoundError:  # means that it's not a wrapper but a pure tensor

            # Check that the function has not been overwritten
            try:
                # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
                command = cls.rgetattr(cls, f"native_{cmd}")
                return command(*args_, **kwargs_)
            except AttributeError:
                pass

            # Run the native function with the new args
            # Note the the cmd should already be checked upon reception by the worker
            # in the execute_command function
            try:
                response = cls._get_response(cmd, args_, kwargs_)
            except AttributeError:
                # Change the library path to avoid errors on layers like AvgPooling
                cmd = cls._fix_torch_library(cmd)
                response = cls._get_response(cmd, args_, kwargs_)

        return response
Ejemplo n.º 21
0
    def mul_and_div(self, other, cmd):
        """
        Hook manually mul and div to add the truncation/rescaling part
        which is inherent to these operations in the fixed precision setting
        """
        changed_sign = False
        if isinstance(other, FixedPrecisionTensor):
            assert (
                self.precision_fractional == other.precision_fractional
            ), "In mul and div, all args should have the same precision_fractional"
            assert self.base == other.base, "In mul and div, all args should have the same base"

        if isinstance(other, (int, torch.Tensor, AdditiveSharingTensor)):
            new_self = self.child
            new_other = other
        elif isinstance(other, float):
            raise NotImplementedError(
                "Can't multiply or divide a FixedPrecisionTensor with a float value"
            )

        elif isinstance(
                self.child,
            (AdditiveSharingTensor, MultiPointerTensor)) and isinstance(
                other.child, torch.Tensor):
            # If operands are FPT>AST and FPT>torch.tensor,
            # we want to perform the operation on AST and torch.tensor
            if cmd == "mul":
                new_self = self.child
            elif cmd == "div":
                new_self = self.child * self.base**self.precision_fractional
            new_other = other

        elif isinstance(
                other.child,
            (AdditiveSharingTensor, MultiPointerTensor)) and isinstance(
                self.child, torch.Tensor):
            # If operands are FPT>torch.tensor and FPT>AST,
            # we swap operators so that we do the same operation as above
            if cmd == "mul":
                new_self = other.child
                new_other = self
            elif cmd == "div":
                # TODO how to divide by AST?
                raise NotImplementedError(
                    "Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
                )

        elif (cmd == "mul"
              and isinstance(self.child,
                             (AdditiveSharingTensor, MultiPointerTensor))
              and isinstance(other.child,
                             (AdditiveSharingTensor, MultiPointerTensor))):
            # If we try to multiply a FPT>torch.tensor with a FPT>AST,
            # we swap operators so that we do the same operation as above
            new_self, new_other, _ = hook_args.unwrap_args_from_method(
                "mul", self, other, None)

        else:
            # Replace all syft tensor with their child attribute
            new_self, new_other, _ = hook_args.unwrap_args_from_method(
                cmd, self, other, None)

            # To avoid problems with negative numbers
            # we take absolute value of the operands
            # The problems could be 1) bad truncation for multiplication
            # 2) overflow when scaling self in division

            # sgn_self is 1 when new_self is positive else it's 0
            # The comparison is different if new_self is a torch tensor or an AST
            sgn_self = (new_self > 0).type(self.torch_dtype)
            pos_self = new_self * sgn_self
            neg_self = new_self * (sgn_self - 1)
            new_self = neg_self + pos_self

            # sgn_other is 1 when new_other is positive else it's 0
            # The comparison is different if new_other is a torch tensor or an AST
            sgn_other = (new_other > 0).type(self.torch_dtype)
            pos_other = new_other * sgn_other
            neg_other = new_other * (sgn_other - 1)
            new_other = neg_other + pos_other

            # If both have the same sign, sgn is 1 else it's 0
            # To be able to write sgn = 1 - (sgn_self - sgn_other) ** 2,
            # we would need to overload the __add__ for operators int and AST.
            sgn = -((sgn_self - sgn_other)**2) + 1
            changed_sign = True

            if cmd == "div":
                new_self *= self.base**self.precision_fractional
        # Send it to the appropriate class and get the response
        response = getattr(new_self, cmd)(new_other)
        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response(
            cmd,
            response,
            wrap_type=type(self),
            wrap_args=self.get_class_attributes())
        if not isinstance(other, (int, torch.Tensor, AdditiveSharingTensor)):
            if cmd == "mul":
                # If operation is mul, we need to truncate
                response = response.truncate(self.precision_fractional,
                                             check_sign=False)

            if changed_sign:
                # Give back its sign to response
                pos_res = response * sgn
                neg_res = response * (sgn - 1)
                response = neg_res + pos_res

        return response
Ejemplo n.º 22
0
        def overloaded_native_method(self, *args, **kwargs):
            """
            Operate the hooking
            """

            if not hasattr(self, "child"):  # means that it's not a wrapper

                # if self is a natural tensor but the first argument isn't,
                # wrap self with the appropriate type and re-run
                if len(args) > 0 and hasattr(args[0], "child"):

                    # if we allow this for PointerTensors it opens the potential
                    # that we could accidentally serialize and send a tensor in the
                    # arguments
                    if not isinstance(args[0].child, PointerTensor):
                        self = type(args[0].child)().on(self, wrap=True)
                        args = [args[0]]
                        return overloaded_native_method(self, *args, **kwargs)

                method = getattr(self, f"native_{method_name}")
                # Run the native function with the new args

                try:
                    response = method(*args, **kwargs)

                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

            else:  # means that there is a wrapper to remove

                try:
                    # Replace all torch tensor with their child attribute
                    new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                        method_name, self, args, kwargs
                    )

                except BaseException as e:  # if there's a type mismatch, try to fix it!

                    try:
                        # if the first argument has no child (meaning it's probably raw data),
                        # try wrapping it with the type of self. We have to except PointerTensor
                        # because otherwise it can lead to inadvertently sending data to another
                        # machine
                        if not hasattr(args[0], "child") and not isinstance(
                            self.child, PointerTensor
                        ):
                            # TODO: add check to make sure this isn't getting around a security class

                            _args = list()
                            _args.append(type(self)().on(args[0], wrap=False))
                            for a in args[1:]:
                                _args.append(a)

                            args = _args

                        # Replace all torch tensor with their child attribute
                        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                            method_name, self, args, kwargs
                        )
                    except BaseException as e:
                        # we can make some errors more descriptive with this method
                        raise route_method_exception(e, self, args, kwargs)

                # Send the new command to the appropriate class and get the response
                method = getattr(new_self, method_name)
                response = method(*new_args, **new_kwargs)

                # For inplace methods, just directly return self
                if syft.framework.is_inplace_method(method_name):
                    return self

                # if object is a pointer of pointer, set register to False
                if isinstance(self.child, PointerTensor):
                    wrap_args = {"register": False}
                else:
                    wrap_args = {}
                # Put back the wrappers where needed
                response = hook_args.hook_response(
                    method_name, response, wrap_type=type(self), new_self=self, wrap_args=wrap_args
                )

            return response
Ejemplo n.º 23
0
        def overloaded_native_method(self, *args, **kwargs):
            """
            Operate the hooking
            """

            if not hasattr(self, "child"):  # means that it's not a wrapper

                # if self is a natural tensor but the first argument isn't,
                # wrap self with the appropriate type and re-run
                if len(args) > 0 and hasattr(args[0], "child"):

                    # if we allow this for PointerTensors it opens the potential
                    # that we could accidentally serialize and send a tensor in the
                    # arguments
                    if not isinstance(args[0].child, PointerTensor):
                        self = type(args[0].child)().on(self, wrap=True)
                        args = [args[0]]
                        return overloaded_native_method(self, *args, **kwargs)

                method = getattr(self, f"native_{method_name}")
                # Run the native function with the new args

                try:
                    response = method(*args, **kwargs)

                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

            else:  # means that there is a wrapper to remove

                try:
                    # Replace all torch tensor with their child attribute
                    new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                        method_name, self, args, kwargs
                    )

                except BaseException as e:  # if there's a type mismatch, try to fix it!

                    try:
                        # if the first argument has no child (meaning it's probably raw data),
                        # try wrapping it with the type of self. We have to except PointerTensor
                        # because otherwise it can lead to inadvertently sending data to another
                        # machine
                        if not hasattr(args[0], "child") and not isinstance(
                            self.child, PointerTensor
                        ):
                            # TODO: add check to make sure this isn't getting around
                            # a security class

                            _args = []
                            _args.append(type(self)().on(args[0], wrap=False))
                            for a in args[1:]:
                                _args.append(a)

                            args = _args
                        elif isinstance(
                            self.child, PointerTensor
                        ) and syft.framework.is_inplace_method(method_name):
                            # under very specific conditions, ie inplace methods containing a
                            # single argument which is a Tensor, we allow automatic sending of
                            # this tensor. This is helpful to facilitate utilizing python code
                            # of other library for remote execution
                            # so clearly, this allows: pointer += tensor
                            if isinstance(args[0], FrameworkTensor) and len(args) == 1:
                                args[0].send_(self.child.location, no_wrap=True)

                        # Replace all torch tensor with their child attribute
                        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                            method_name, self, args, kwargs
                        )
                    except BaseException as e:
                        # we can make some errors more descriptive with this method
                        raise route_method_exception(e, self, args, kwargs)

                # Send the new command to the appropriate class and get the response
                method = getattr(new_self, method_name)
                response = method(*new_args, **new_kwargs)

                # For inplace methods, just directly return self
                if syft.framework.is_inplace_method(method_name):
                    return self

                # Put back the wrappers where needed
                response = hook_args.hook_response(
                    method_name,
                    response,
                    wrap_type=type(self),
                    new_self=self,
                    wrap_args=self.get_class_attributes(),
                )

            return response