Exemple #1
0
def test_isub():
    """
    Test -= AutogradTensor
    """

    a = AutogradTensor(torch.Tensor([1, 2, 3]))
    b = AutogradTensor(torch.Tensor([4, 5, 6]))
    expected = AutogradTensor(torch.Tensor([-3, -3, -3]))

    a -= b
    assert a is not None
    assert torch.equal(a, expected)
Exemple #2
0
def test_iadd():
    """
    Test += AutogradTensor
    """

    a = AutogradTensor(torch.Tensor([1, 2, 3]))
    b = AutogradTensor(torch.Tensor([4, 5, 6]))
    expected = AutogradTensor(torch.Tensor([5, 7, 9]))

    a += b
    assert a is not None
    assert torch.equal(a, expected)
Exemple #3
0
def test_wrap():
    """
    Test the .on() wrap functionality for AutogradTensor
    """

    x_tensor = torch.Tensor([1, 2, 3])
    x = AutogradTensor().on(x_tensor)

    assert isinstance(x, torch.Tensor)
    assert isinstance(x.child, AutogradTensor)
    assert isinstance(x.child.child, torch.Tensor)
Exemple #4
0
    def build(self, *args, trace_autograd=False):
        """Builds the plan.

        First, run the function to be converted in a plan in a context which
        activates the tracing and record the actions in trace.logs

        Second, store the result ids temporarily to helper ordering the output
        placeholders at return time

        Third, loop through the trace logs and replace the tensors found in the
        actions logged by PlaceHolders. Record those actions in
        plan.actions

        Args:
            args: Input arguments to run the plan
        """
        # Reset previous build
        self.role.reset()

        def build_nested_arg(arg, leaf_function):
            if isinstance(arg, list):
                return [build_nested_arg(obj, leaf_function) for obj in arg]
            elif isinstance(arg, tuple):
                return tuple(
                    [build_nested_arg(obj, leaf_function) for obj in arg])
            elif isinstance(arg, dict):
                return {
                    k: build_nested_arg(v, leaf_function)
                    for k, v in arg.items()
                }
            else:
                return leaf_function(arg)

        # Enable tracing
        self.toggle_tracing(True)
        self.is_building = True

        # typecheck
        self.input_types = NestedTypeWrapper(args)

        # Run once to build the plan
        if trace_autograd:
            # Wrap arguments that require gradients with AutogradTensor,
            # to be able to trace autograd operations
            args = build_nested_arg(
                args,
                lambda x: AutogradTensor().on(x, wrap=False)
                if isinstance(x, FrameworkTensor) and x.requires_grad else x,
            )
            # Add Placeholder after AutogradTensor in the chain
            # so that all operations that happen inside AutogradTensor are recorded by Placeholder
            args_placeholders = build_nested_arg(
                args,
                lambda x: PlaceHolder.insert(
                    x, AutogradTensor, role=self.role, tracing=True),
            )
        else:
            # Add Placeholder on top of each arg
            args = args_placeholders = build_nested_arg(
                args,
                lambda x: PlaceHolder.create_from(
                    x, role=self.role, tracing=True),
            )

        # Add state to args if needed
        if self.include_state:
            args += (self.state, )

        # Check the plan arguments to see what framework wrappers we might need to send to the plan
        framework_kwargs = {}

        forward_args = inspect.getfullargspec(self.forward).args
        for f_name, wrap_framework_func in Plan._wrapped_frameworks.items():
            if f_name in forward_args:
                framework_kwargs[f_name] = wrap_framework_func(
                    self.role, self.owner)

        results = self.forward(*args, **framework_kwargs)

        # Disable tracing
        self.toggle_tracing(False)
        self.is_building = False

        # Register inputs in role
        self.role.register_inputs(args_placeholders)

        # Register outputs in role
        if isinstance(results, (tuple, list)):
            results_placeholders = tuple(
                PlaceHolder.extract(result) for result in results)
        else:
            results_placeholders = PlaceHolder.extract(results)
        self.role.register_outputs(results_placeholders)

        self.is_built = True

        # Build registered translations
        for translator in Plan._build_translators:
            try:
                self.add_translation(translator)
                self.translations.append(translator)
            except:
                warnings.warn(
                    f"Failed to translate Plan with {translator.__name__}")

        return results
Exemple #5
0
    def build(self, *args, trace_autograd=False):
        """Builds the plan.

        First, run the function to be converted in a plan in a context which
        activates the tracing and record the actions in trace.logs

        Second, store the result ids temporarily to helper ordering the output
        placeholders at return time

        Third, loop through the trace logs and replace the tensors found in the
        actions logged by PlaceHolders. Record those actions in
        plan.actions

        Args:
            args: Input arguments to run the plan
        """

        # Enable tracing
        self.toggle_tracing(True)
        self.is_building = True

        if trace_autograd:
            # Wrap arguments that require gradients with AutogradTensor,
            # to be able to trace autograd operations
            args = tuple(
                AutogradTensor().on(arg, wrap=False) if
                isinstance(arg, FrameworkTensor) and arg.requires_grad else arg
                for arg in args)
            # Add Placeholder after AutogradTensor in the chain
            # so that all operations that happen inside AutogradTensor are recorded by Placeholder
            args_placeholders = tuple(
                PlaceHolder.insert(arg,
                                   AutogradTensor,
                                   owner=sy.local_worker,
                                   role=self.role,
                                   tracing=True) for arg in args)
        else:
            # Add Placeholder on top of each arg
            args = args_placeholders = tuple(
                PlaceHolder.create_from(
                    arg, owner=sy.local_worker, role=self.role, tracing=True)
                for arg in args)

        # Add state to args if needed
        if self.include_state:
            args += (self.state, )

        with trace(framework_packages["torch"], self.role,
                   self.owner) as wrapped_torch:
            # Look for framework kwargs
            framework_kwargs = {}
            forward_args = inspect.getfullargspec(self.forward).args
            if "torch" in forward_args:
                framework_kwargs["torch"] = wrapped_torch

            results = self.forward(*args, **framework_kwargs)

        # Disable tracing
        self.toggle_tracing(False)
        self.is_building = False

        # Register inputs in role
        self.role.register_inputs(args_placeholders)

        # Register outputs in role
        if isinstance(results, (tuple, list)):
            results_placeholders = tuple(
                PlaceHolder.extract(result) for result in results)
        else:
            results_placeholders = PlaceHolder.extract(results)
        self.role.register_outputs(results_placeholders)

        self.is_built = True

        # Build registered translations
        for translator in Plan._build_translators:
            try:
                self.add_translation(translator)
                self.translations.append(translator)
            except:
                warnings.warn(
                    f"Failed to translate Plan with {translator.__name__}")

        return results
Exemple #6
0
    lambda i: i.child if hasattr(i, "child") else
    (_ for _ in ()).throw(PureFrameworkTensorFoundError),
    AutogradTensor:
    get_child,
    LoggingTensor:
    get_child,
    PaillierTensor:
    get_child,
}

backward_func = {
    TorchTensor: lambda i, **kwargs: i.wrap(**kwargs),
    torch.Tensor: lambda i, **kwargs: i.wrap(**kwargs),
    torch.nn.Parameter: lambda i, **kwargs: torch.nn.Parameter(data=i),
    AutogradTensor:
    lambda i, **kwargs: AutogradTensor(data=i).on(i, wrap=False),
    LoggingTensor: lambda i, **kwargs: LoggingTensor().on(i, wrap=False),
    PaillierTensor: lambda i, **kwargs: PaillierTensor().on(i, wrap=False),
}

if dependency_check.crypten_available:
    import crypten

    type_rule[crypten.mpc.MPCTensor] = one
    forward_func[crypten.mpc.MPCTensor] = (
        lambda i: i.child
        if hasattr(i, "child") else ().throw(PureFrameworkTensorFoundError))
    backward_func[crypten.mpc.MPCTensor] = lambda i, **kwargs: i.wrap(**kwargs)

# Methods or functions whose signature changes a lot and that we don't want to "cache", because
# they have an arbitrary number of tensors in args which can trigger unexpected behaviour
Exemple #7
0
forward_func = {
    torch.Tensor: lambda i: i.child
    if hasattr(i, "child")
    else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
    torch.nn.Parameter: lambda i: i.child
    if hasattr(i, "child")
    else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
    AutogradTensor: get_child,
    LoggingTensor: get_child,
}

backward_func = {
    TorchTensor: lambda i: i.wrap(),
    torch.Tensor: lambda i: i.wrap(),
    torch.nn.Parameter: lambda i: torch.nn.Parameter(data=i),
    AutogradTensor: lambda i: AutogradTensor(data=i).on(i, wrap=False),
    LoggingTensor: lambda i: LoggingTensor().on(i, wrap=False),
}

ambiguous_methods = {
    "__getitem__",
    "_getitem_public",
    "__setitem__",
    "view",
    "permute",
    "add_",
    "sub_",
    "new",
    "chunk",
}