示例#1
0
    def __add__(self, *args, **kwargs):
        """
        Here is the version of the add method without the decorator: as you can see
        it is much more complicated. However you misght need sometimes to specify
        some particular behaviour: so here what to start from :)
        """

        if isinstance(args[0], th.Tensor):
            data = self.child + args[0].numpy()
            obj = PaillierTensor()
            obj.child = data
            return obj

        if isinstance(self.child, th.Tensor):
            self.child = self.child.numpy()

        # Replace all syft tensor with their child attribute
        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
            "__add__", self, args, kwargs)

        # Send it to the appropriates class and get the response
        response = getattr(new_self, "__add__")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response("__add__",
                                           response,
                                           wrap_type=type(self))
        return response
示例#2
0
            def method_with_grad(*args, **kwargs):
                new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                    name, self, args, kwargs
                )

                result = getattr(new_self, name)(*new_args, **new_kwargs)

                # Put back SyftTensor on the tensors found in the response
                result = hook_args.hook_response(name, result, wrap_type=type(self))
                result.grad_fn = grad_fn(self, *args, **kwargs)

                return result
示例#3
0
        def overloaded_native_method(self, *args, **kwargs):
            """
            Operate the hooking
            """
            if not hasattr(self, "child"):  # means that it's not a wrapper
                method = getattr(self, f"native_{method_name}")
                # Run the native function with the new args

                try:
                    response = method(*args, **kwargs)
                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

            else:  # means that there is a wrapper to remove
                try:
                    # Replace all torch tensor with their child attribute
                    new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                        method_name, self, args, kwargs
                    )
                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

                # Send the new command to the appropriate class and get the response
                method = getattr(new_self, method_name)
                response = method(*new_args, **new_kwargs)

                response.parents = (self.id, new_self.id)

                # For inplace methods, just directly return self
                if syft.framework.is_inplace_method(method_name):
                    return self

                # Put back the wrappers where needed
                response = hook_args.hook_response(
                    method_name,
                    response,
                    wrap_type=type(self),
                    new_self=self,
                    wrap_args=self.get_class_attributes(),
                )
                if args:
                    response.parents = (self, args[0])
                else:
                    response.parents = self
                response.command = method_name
            return response
示例#4
0
    def matmul(self, *args, **kwargs):
        """
        Hook manually matmul to add the truncation part which is inherent to multiplication
        in the fixed precision setting
        """

        other = args[0]

        if isinstance(other, FixedPrecisionTensor):
            assert (
                self.precision_fractional == other.precision_fractional
            ), "In matmul, all args should have the same precision_fractional"

        if isinstance(self.child, AdditiveSharingTensor) and isinstance(
                other.child, torch.Tensor):
            # If we try to matmul a FPT>(wrap)>AST with a FPT>torch.tensor,
            # we want to perform AST @ torch.tensor
            new_self = self.child
            new_args = (other, )
            new_kwargs = kwargs

        elif isinstance(other.child, AdditiveSharingTensor) and isinstance(
                self.child, torch.Tensor):
            # If we try to matmul a FPT>torch.tensor with a FPT>(wrap)>AST,
            # we swap operators so that we do the same operation as above
            new_self = other.child
            new_args = (self, )
            new_kwargs = kwargs

        else:
            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                "matmul", self, args, kwargs)

        # Send it to the appropriate class and get the response
        response = getattr(new_self, "matmul")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response(
            "matmul",
            response,
            wrap_type=type(self),
            wrap_args=self.get_class_attributes())

        response %= self.field  # Wrap around the field
        response = response.truncate(other.precision_fractional)

        return response
示例#5
0
        def _hook_method_args(self, *args, **kwargs):
            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                attr.__name__, self, args, kwargs)

            # Send it to the appropriate class and get the response
            response = attr(self, new_self, *new_args, **new_kwargs)

            # Put back SyftTensor on the tensors found in the response
            response = hook_args.hook_response(
                attr.__name__,
                response,
                wrap_type=type(self),
                wrap_args=self.get_class_attributes())

            return response
示例#6
0
        def overloaded_syft_method(self, *args, **kwargs):
            """
            Operate the hooking
            """
            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                attr, self, args, kwargs
            )

            # Send it to the appropriate class and get the response
            response = getattr(new_self, attr)(*new_args, **new_kwargs)

            # Put back SyftTensor on the tensors found in the response
            response = hook_args.hook_response(
                attr, response, wrap_type=type(self), wrap_args=self.get_class_attributes()
            )

            return response
示例#7
0
    def manual_add(self, *args, **kwargs):
        """
        Here is the version of the add method without the decorator: as you can see
        it is much more complicated. However you might need sometimes to specify
        some particular behaviour: so here what to start from :)
        """
        # Replace all syft tensor with their child attribute
        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
            "add", self, args, kwargs)

        print("Log method manual_add")
        # Send it to the appropriate class and get the response
        response = getattr(new_self, "add")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response("add",
                                           response,
                                           wrap_type=type(self))
        return response
示例#8
0
        def overloaded_attr(self, *args, **kwargs):
            """
            Operate the hooking
            """

            # Replace all syft tensor with their child attribute
            new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                attr, self, args, kwargs
            )

            results = {}
            for k, v in new_self.items():
                results[k] = v.__getattribute__(attr)(*dispatch(new_args, k), **new_kwargs)

            # Put back MultiPointerTensor on the tensors found in the response
            response = hook_args.hook_response(
                attr, results, wrap_type=MultiPointerTensor, wrap_args=self.get_class_attributes()
            )

            return response
示例#9
0
    def mul_and_div(self, other, cmd):
        """
        Hook manually mul and div to add the truncation/rescaling part
        which is inherent to these operations in the fixed precision setting
        """
        changed_sign = False
        if isinstance(other, FixedPrecisionTensor):
            assert (
                self.precision_fractional == other.precision_fractional
            ), "In mul and div, all args should have the same precision_fractional"
            assert self.base == other.base, "In mul and div, all args should have the same base"

        if isinstance(other, (int, torch.Tensor, AdditiveSharingTensor)):
            new_self = self.child
            new_other = other
        elif isinstance(other, float):
            raise NotImplementedError(
                "Can't multiply or divide a FixedPrecisionTensor with a float value"
            )

        elif isinstance(
                self.child,
            (AdditiveSharingTensor, MultiPointerTensor)) and isinstance(
                other.child, torch.Tensor):
            # If operands are FPT>AST and FPT>torch.tensor,
            # we want to perform the operation on AST and torch.tensor
            if cmd == "mul":
                new_self = self.child
            elif cmd == "div":
                new_self = self.child * self.base**self.precision_fractional
            new_other = other

        elif isinstance(
                other.child,
            (AdditiveSharingTensor, MultiPointerTensor)) and isinstance(
                self.child, torch.Tensor):
            # If operands are FPT>torch.tensor and FPT>AST,
            # we swap operators so that we do the same operation as above
            if cmd == "mul":
                new_self = other.child
                new_other = self
            elif cmd == "div":
                # TODO how to divide by AST?
                raise NotImplementedError(
                    "Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
                )

        elif (cmd == "mul"
              and isinstance(self.child,
                             (AdditiveSharingTensor, MultiPointerTensor))
              and isinstance(other.child,
                             (AdditiveSharingTensor, MultiPointerTensor))):
            # If we try to multiply a FPT>torch.tensor with a FPT>AST,
            # we swap operators so that we do the same operation as above
            new_self, new_other, _ = hook_args.unwrap_args_from_method(
                "mul", self, other, None)

        else:
            # Replace all syft tensor with their child attribute
            new_self, new_other, _ = hook_args.unwrap_args_from_method(
                cmd, self, other, None)

            # To avoid problems with negative numbers
            # we take absolute value of the operands
            # The problems could be 1) bad truncation for multiplication
            # 2) overflow when scaling self in division

            # sgn_self is 1 when new_self is positive else it's 0
            # The comparison is different if new_self is a torch tensor or an AST
            sgn_self = (new_self > 0).type(self.torch_dtype)
            pos_self = new_self * sgn_self
            neg_self = new_self * (sgn_self - 1)
            new_self = neg_self + pos_self

            # sgn_other is 1 when new_other is positive else it's 0
            # The comparison is different if new_other is a torch tensor or an AST
            sgn_other = (new_other > 0).type(self.torch_dtype)
            pos_other = new_other * sgn_other
            neg_other = new_other * (sgn_other - 1)
            new_other = neg_other + pos_other

            # If both have the same sign, sgn is 1 else it's 0
            # To be able to write sgn = 1 - (sgn_self - sgn_other) ** 2,
            # we would need to overload the __add__ for operators int and AST.
            sgn = -((sgn_self - sgn_other)**2) + 1
            changed_sign = True

            if cmd == "div":
                new_self *= self.base**self.precision_fractional
        # Send it to the appropriate class and get the response
        response = getattr(new_self, cmd)(new_other)
        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response(
            cmd,
            response,
            wrap_type=type(self),
            wrap_args=self.get_class_attributes())
        if not isinstance(other, (int, torch.Tensor, AdditiveSharingTensor)):
            if cmd == "mul":
                # If operation is mul, we need to truncate
                response = response.truncate(self.precision_fractional,
                                             check_sign=False)

            if changed_sign:
                # Give back its sign to response
                pos_res = response * sgn
                neg_res = response * (sgn - 1)
                response = neg_res + pos_res

        return response
示例#10
0
        def overloaded_native_method(self, *args, **kwargs):
            """
            Operate the hooking
            """

            if not hasattr(self, "child"):  # means that it's not a wrapper

                # if self is a natural tensor but the first argument isn't,
                # wrap self with the appropriate type and re-run
                if len(args) > 0 and hasattr(args[0], "child"):

                    # if we allow this for PointerTensors it opens the potential
                    # that we could accidentally serialize and send a tensor in the
                    # arguments
                    if not isinstance(args[0].child, PointerTensor):
                        self = type(args[0].child)().on(self, wrap=True)
                        args = [args[0]]
                        return overloaded_native_method(self, *args, **kwargs)

                method = getattr(self, f"native_{method_name}")
                # Run the native function with the new args

                try:
                    response = method(*args, **kwargs)

                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

            else:  # means that there is a wrapper to remove

                try:
                    # Replace all torch tensor with their child attribute
                    new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                        method_name, self, args, kwargs
                    )

                except BaseException as e:  # if there's a type mismatch, try to fix it!

                    try:
                        # if the first argument has no child (meaning it's probably raw data),
                        # try wrapping it with the type of self. We have to except PointerTensor
                        # because otherwise it can lead to inadvertently sending data to another
                        # machine
                        if not hasattr(args[0], "child") and not isinstance(
                            self.child, PointerTensor
                        ):
                            # TODO: add check to make sure this isn't getting around a security class

                            _args = list()
                            _args.append(type(self)().on(args[0], wrap=False))
                            for a in args[1:]:
                                _args.append(a)

                            args = _args

                        # Replace all torch tensor with their child attribute
                        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                            method_name, self, args, kwargs
                        )
                    except BaseException as e:
                        # we can make some errors more descriptive with this method
                        raise route_method_exception(e, self, args, kwargs)

                # Send the new command to the appropriate class and get the response
                method = getattr(new_self, method_name)
                response = method(*new_args, **new_kwargs)

                # For inplace methods, just directly return self
                if syft.framework.is_inplace_method(method_name):
                    return self

                # if object is a pointer of pointer, set register to False
                if isinstance(self.child, PointerTensor):
                    wrap_args = {"register": False}
                else:
                    wrap_args = {}
                # Put back the wrappers where needed
                response = hook_args.hook_response(
                    method_name, response, wrap_type=type(self), new_self=self, wrap_args=wrap_args
                )

            return response
示例#11
0
        def overloaded_native_method(self, *args, **kwargs):
            """
            Operate the hooking
            """

            if not hasattr(self, "child"):  # means that it's not a wrapper

                # if self is a natural tensor but the first argument isn't,
                # wrap self with the appropriate type and re-run
                if len(args) > 0 and hasattr(args[0], "child"):

                    # if we allow this for PointerTensors it opens the potential
                    # that we could accidentally serialize and send a tensor in the
                    # arguments
                    if not isinstance(args[0].child, PointerTensor):
                        self = type(args[0].child)().on(self, wrap=True)
                        args = [args[0]]
                        return overloaded_native_method(self, *args, **kwargs)

                method = getattr(self, f"native_{method_name}")
                # Run the native function with the new args

                try:
                    response = method(*args, **kwargs)

                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

            else:  # means that there is a wrapper to remove

                try:
                    # Replace all torch tensor with their child attribute
                    new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                        method_name, self, args, kwargs
                    )

                except BaseException as e:  # if there's a type mismatch, try to fix it!

                    try:
                        # if the first argument has no child (meaning it's probably raw data),
                        # try wrapping it with the type of self. We have to except PointerTensor
                        # because otherwise it can lead to inadvertently sending data to another
                        # machine
                        if not hasattr(args[0], "child") and not isinstance(
                            self.child, PointerTensor
                        ):
                            # TODO: add check to make sure this isn't getting around
                            # a security class

                            _args = []
                            _args.append(type(self)().on(args[0], wrap=False))
                            for a in args[1:]:
                                _args.append(a)

                            args = _args
                        elif isinstance(
                            self.child, PointerTensor
                        ) and syft.framework.is_inplace_method(method_name):
                            # under very specific conditions, ie inplace methods containing a
                            # single argument which is a Tensor, we allow automatic sending of
                            # this tensor. This is helpful to facilitate utilizing python code
                            # of other library for remote execution
                            # so clearly, this allows: pointer += tensor
                            if isinstance(args[0], FrameworkTensor) and len(args) == 1:
                                args[0].send_(self.child.location, no_wrap=True)

                        # Replace all torch tensor with their child attribute
                        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                            method_name, self, args, kwargs
                        )
                    except BaseException as e:
                        # we can make some errors more descriptive with this method
                        raise route_method_exception(e, self, args, kwargs)

                # Send the new command to the appropriate class and get the response
                method = getattr(new_self, method_name)
                response = method(*new_args, **new_kwargs)

                # For inplace methods, just directly return self
                if syft.framework.is_inplace_method(method_name):
                    return self

                # Put back the wrappers where needed
                response = hook_args.hook_response(
                    method_name,
                    response,
                    wrap_type=type(self),
                    new_self=self,
                    wrap_args=self.get_class_attributes(),
                )

            return response