예제 #1
0
파일: role.py 프로젝트: mybian/PySyft
    def _execute_action(self, action):
        """ Build placeholders and store action.
        """
        cmd, _self, args_, kwargs_, return_placeholder = (
            action.name,
            action.target,  # target is equivalent to the "self" in a method
            action.args,
            action.kwargs,
            action.return_ids,
        )
        _self = self._fetch_placeholders_from_ids(_self)
        args_ = self._fetch_placeholders_from_ids(args_)
        kwargs_ = self._fetch_placeholders_from_ids(kwargs_)
        return_placeholder = self._fetch_placeholders_from_ids(return_placeholder)

        if _self is None:
            method = self._fetch_package_method(cmd)
            response = method(*args_, **kwargs_)
        else:
            response = getattr(_self, cmd)(*args_, **kwargs_)

        if not isinstance(response, (tuple, list)):
            response = (response,)

        PlaceHolder.instantiate_placeholders(return_placeholder, response)
예제 #2
0
파일: tracing.py 프로젝트: wddan1/PySyft
        def trace_wrapper(*args, **kwargs):
            cmd_name = ".".join((self.package.__name__, attr_name))
            command = (cmd_name, None, args, kwargs)

            result = package_attr(*args, **kwargs)

            if isinstance(result, FrameworkTensor):
                result = PlaceHolder.create_from(
                    result, owner=self.owner, role=self.role, tracing=True
                )
                self.role.register_action(
                    (command, result), sy.execution.computation.ComputationAction
                )
            elif isinstance(result, (list, tuple)):
                result = tuple(
                    PlaceHolder.create_from(r, owner=self.owner, role=self.role, tracing=True)
                    for r in result
                )
                self.role.register_action(
                    (command, result), sy.execution.computation.ComputationAction
                )
            else:
                self.role.register_action(
                    (command, None), sy.execution.computation.ComputationAction
                )

            return result
예제 #3
0
 def _instantiate_inputs(self, args):
     """ Takes input arguments for this role and generate placeholders.
     """
     input_placeholders = tuple(
         self.placeholders[input_id] for input_id in self.input_placeholder_ids
     )
     PlaceHolder.instantiate_placeholders(input_placeholders, args)
예제 #4
0
파일: role.py 프로젝트: zyedmaheen/PySyft
    def _execute_action(self, action):
        """ Build placeholders and store action.
        """
        cmd, _self, args_, kwargs_, return_values = (
            action.name,
            action.target,  # target is equivalent to the "self" in a method
            action.args,
            action.kwargs,
            action.return_ids,
        )
        _self = self._fetch_placeholders_from_ids(_self)
        args_ = self._fetch_placeholders_from_ids(args_)
        kwargs_ = self._fetch_placeholders_from_ids(kwargs_)
        return_values = self._fetch_placeholders_from_ids(return_values)

        # We can only instantiate placeholders, filter them
        return_placeholders = []
        Role.nested_object_traversal(return_values,
                                     lambda ph: return_placeholders.append(ph),
                                     PlaceHolder)

        if _self is None:
            method = self._fetch_package_method(cmd)
            response = method(*args_, **kwargs_)
        else:
            response = getattr(_self, cmd)(*args_, **kwargs_)

        if not isinstance(response, (tuple, list)):
            response = (response, )

        PlaceHolder.instantiate_placeholders(return_placeholders, response)
예제 #5
0
    def _execute_action(self, action):
        """ Build placeholders and store action.
        """
        cmd, _self, args, kwargs, return_placeholder = (
            action.name,
            action.target,  # target is equivalent to the "self" in a method
            action.args,
            action.kwargs,
            action.return_ids,
        )
        _self = self._fetch_placeholders_from_ids(_self)
        args = self._fetch_placeholders_from_ids(args)
        kwargs = self._fetch_placeholders_from_ids(kwargs)
        return_placeholder = self._fetch_placeholders_from_ids(
            return_placeholder)

        if _self is None:
            response = eval(cmd)(*args, **kwargs)  # nosec
        else:
            response = getattr(_self, cmd)(*args, **kwargs)

        if isinstance(response, PlaceHolder) or isinstance(
                response, FrameworkTensor):
            response = (response, )
            PlaceHolder.instantiate_placeholders(return_placeholder, response)
예제 #6
0
        def trace_wrapper(*args, **kwargs):
            """creates placeholders and registers ComputationAction to role"""
            cmd_name = ".".join((self.package.__name__, attr_name))
            command = (cmd_name, None, args, kwargs)

            result = package_attr(*args, **kwargs)

            if isinstance(result, PlaceHolder) or (
                isinstance(result, (list, tuple))
                and any(isinstance(r, PlaceHolder) for r in result)
            ):
                # In this case, the tracing was already done in Placeholder.handle_func_command
                return result

            if isinstance(result, FrameworkTensor):
                result = PlaceHolder.create_from(result, role=self.role, tracing=True)
                self.role.register_action(
                    (command, result), sy.execution.computation.ComputationAction
                )
            elif isinstance(result, (list, tuple)):
                result = tuple(
                    PlaceHolder.create_from(r, role=self.role, tracing=True) for r in result
                )
                self.role.register_action(
                    (command, result), sy.execution.computation.ComputationAction
                )
            else:
                self.role.register_action(
                    (command, None), sy.execution.computation.ComputationAction
                )

            return result
예제 #7
0
        def wrap_stateful_plan(*args):
            role = plan.role
            state = args[-1]
            if 0 < len(role.state.state_placeholders) == len(state) and isinstance(
                state, (list, tuple)
            ):
                state_placeholders = tuple(
                    role.placeholders[ph.id.value] for ph in role.state.state_placeholders
                )
                PlaceHolder.instantiate_placeholders(role.state.state_placeholders, state)
                PlaceHolder.instantiate_placeholders(state_placeholders, state)

            return plan(*args[:-1])
예제 #8
0
 def create_dummy(input_type, input_placeholder):
     if issubclass(input_type, FrameworkTensor):
         return input_type(
             PlaceHolder.create_placeholders(
                 [input_placeholder.expected_shape])[0])
     else:
         return input_type()
예제 #9
0
    def translate(self):
        translation_plan = self.plan.copy()
        translation_plan.forward = None

        args_shape = translation_plan.get_args_shape()
        args = PlaceHolder.create_placeholders(args_shape)

        # To avoid storing Plan state tensors in torchscript, they will be send as parameters
        # we trace wrapper func, which accepts state parameters as last arg
        # and sets them into the Plan before executing the Plan
        def wrap_stateful_plan(*args):
            role = translation_plan.role
            state = args[-1]
            if 0 < len(role.state.state_placeholders) == len(
                    state) and isinstance(state, (list, tuple)):
                state_placeholders = tuple(
                    role.placeholders[ph.id.value]
                    for ph in role.state.state_placeholders)
                PlaceHolder.instantiate_placeholders(
                    role.state.state_placeholders, state)
                PlaceHolder.instantiate_placeholders(state_placeholders, state)

            return translation_plan(*args[:-1])

        plan_params = translation_plan.parameters()
        if len(plan_params) > 0:
            torchscript_plan = jit.trace(wrap_stateful_plan,
                                         (*args, plan_params))
        else:
            torchscript_plan = jit.trace(translation_plan, args)

        self.plan.torchscript = torchscript_plan
        return self.plan
예제 #10
0
파일: role.py 프로젝트: yashk2000/PySyft
 def load(self, tensor):
     """Load tensors used in a protocol from worker's local store"""
     # TODO mock for now, load will use worker's store in a future work
     if self.tracing:
         return PlaceHolder.create_from(tensor, role=self, tracing=True)
     else:
         return tensor
예제 #11
0
    def copy(self):
        # TODO not the cleanest method ever
        placeholders = {}
        old_ids_2_new_ids = {}
        for ph in self.placeholders.values():
            copy = ph.copy()
            old_ids_2_new_ids[ph.id.value] = copy.id.value
            placeholders[copy.id.value] = copy

        new_input_placeholder_ids = tuple(
            old_ids_2_new_ids[self.placeholders[input_id].id.value]
            for input_id in self.input_placeholder_ids)
        new_output_placeholder_ids = tuple(
            old_ids_2_new_ids[self.placeholders[output_id].id.value]
            for output_id in self.output_placeholder_ids)

        state_placeholders = []
        for ph in self.state.state_placeholders:
            new_ph = PlaceHolder(id=old_ids_2_new_ids[ph.id.value],
                                 owner=self.owner).instantiate(ph.child)
            state_placeholders.append(new_ph)

        state = State(owner=self.owner, state_placeholders=state_placeholders)

        def _replace_placeholder_ids(obj):
            if isinstance(obj, (tuple, list)):
                r = [_replace_placeholder_ids(o) for o in obj]
                return type(obj)(r)
            elif isinstance(obj, dict):
                return {
                    key: _replace_placeholder_ids(value)
                    for key, value in obj.items()
                }
            elif isinstance(obj, PlaceholderId):
                return PlaceholderId(old_ids_2_new_ids[obj.value])
            else:
                return obj

        new_actions = []
        for action in self.actions:
            action_type = type(action)
            target = _replace_placeholder_ids(action.target)
            args = _replace_placeholder_ids(action.args)
            kwargs = _replace_placeholder_ids(action.kwargs)
            return_ids = _replace_placeholder_ids(action.return_ids)
            new_actions.append(
                action_type(action.name, target, args, kwargs, return_ids))

        return Role(
            state=state,
            actions=new_actions,
            placeholders=placeholders,
            input_placeholder_ids=new_input_placeholder_ids,
            output_placeholder_ids=new_output_placeholder_ids,
            id=sy.ID_PROVIDER.pop(),
            owner=self.owner,
            tags=self.tags,
            description=self.description,
        )
예제 #12
0
 def create_dummy(input_type, input_placeholder):
     if issubclass(input_type, FrameworkTensor):
         tensors = PlaceHolder.create_placeholders(
             [input_placeholder.expected_shape],
             [input_placeholder.expected_dtype])
         var = tensors[0]
         if input_type != type(var):
             var = input_type(var)
         return var
     else:
         return input_type()
예제 #13
0
    def serialize_model_params(params):
        """Serializes list of tensors into State/protobuf."""
        model_params_state = State(state_placeholders=[
            PlaceHolder().instantiate(param) for param in params
        ])

        # make fake local worker for serialization
        worker = sy.VirtualWorker(hook=None)

        pb = protobuf.serde._bufferize(worker, model_params_state)
        serialized_state = pb.SerializeToString()

        return serialized_state
예제 #14
0
def test_reset():
    role = Role()
    placeholder = PlaceHolder()
    target = torch.ones([1])

    action = ("get", target, (), {})

    role.register_action((action, placeholder), CommunicationAction)
    role.placeholders = {"ph_id1": PlaceHolder(), "ph_id2": PlaceHolder()}
    role.input_placeholder_ids = ("input1", "input2")
    role.output_placeholder_ids = ("output1", )

    assert len(role.actions) == 1
    assert len(role.placeholders) == 2
    assert role.input_placeholder_ids == ("input1", "input2")
    assert role.output_placeholder_ids == ("output1", )

    role.reset()

    assert len(role.actions) == 0
    assert len(role.placeholders) == 0
    assert role.input_placeholder_ids == ()
    assert role.output_placeholder_ids == ()
예제 #15
0
    def build(self, *args):
        """Builds the protocol.

        First, run the function to be converted in a protocol in a context which
        activates the tracing and record the actions in trace.logs

        Second, store the result ids temporarily to helper ordering the output
        placeholders at return time

        Third, loop through the trace logs and replace the tensors found in the
        actions logged by PlaceHolders. Record those actions in
        protocol.actions

        Args:
            args: Input arguments to run the protocol
        """
        # Reset previous build
        self.roles = {}

        # Enable tracing
        self.toggle_tracing(True)
        self.is_building = True

        # Run once to build the protocol
        ph_args = tuple()
        for arg in args:
            arg_role = self.get_role_for_owner(arg.owner)

            ph_arg = PlaceHolder.create_from(arg, owner=arg.owner, role=arg_role, tracing=True)
            # Register inputs in role
            arg_role.register_input(ph_arg)

            ph_args += (ph_arg,)

        results = self.forward(*ph_args)

        # Disable tracing
        self.toggle_tracing(False)
        self.is_building = False

        # Register outputs in roles
        for result in results:
            if isinstance(result, PlaceHolder):
                result_role = self.get_role_for_owner(result.owner)
                result_role.register_output(result)

        self.is_built = True

        return results
예제 #16
0
    def report(self, updated_model_params: list):
        # Calc params diff
        orig_params = self.model.tensors()
        diff_params = [orig_params[i] - updated_model_params[i] for i in range(len(orig_params))]

        # Wrap diff in State
        diff_ph = [PlaceHolder().instantiate(t) for t in diff_params]
        diff = State(state_placeholders=diff_ph)

        response = self.grid_client.report(
            worker_id=self.fl_client.worker_id,
            request_key=self.cycle_params["request_key"],
            diff=diff,
        )
        return response
예제 #17
0
파일: role.py 프로젝트: zyedmaheen/PySyft
    def copy(self):
        # TODO not the cleanest method ever
        placeholders = {}
        old_ids_2_new_ids = {}
        for ph in self.placeholders.values():
            copy = ph.copy()
            old_ids_2_new_ids[ph.id.value] = copy.id.value
            placeholders[copy.id.value] = copy

        new_input_placeholder_ids = tuple(
            old_ids_2_new_ids[self.placeholders[input_id].id.value]
            for input_id in self.input_placeholder_ids)
        new_output_placeholder_ids = tuple(
            old_ids_2_new_ids[self.placeholders[output_id].id.value]
            for output_id in self.output_placeholder_ids)

        state_placeholders = []
        for ph in self.state.state_placeholders:
            new_ph = PlaceHolder(
                id=old_ids_2_new_ids[ph.id.value]).instantiate(ph.child)
            state_placeholders.append(new_ph)

        state = State(state_placeholders)

        _replace_placeholder_ids = lambda obj: Role.nested_object_traversal(
            obj, lambda x: PlaceholderId(old_ids_2_new_ids[x.value]),
            PlaceholderId)

        new_actions = []
        for action in self.actions:
            action_type = type(action)
            target = _replace_placeholder_ids(action.target)
            args_ = _replace_placeholder_ids(action.args)
            kwargs_ = _replace_placeholder_ids(action.kwargs)
            return_ids = _replace_placeholder_ids(action.return_ids)
            new_actions.append(
                action_type(action.name, target, args_, kwargs_, return_ids))

        return Role(
            state=state,
            actions=new_actions,
            placeholders=placeholders,
            input_placeholder_ids=new_input_placeholder_ids,
            output_placeholder_ids=new_output_placeholder_ids,
            id=sy.ID_PROVIDER.pop(),
        )
예제 #18
0
 def _build_placeholders(self, obj):
     """
     Replace in an object all FrameworkTensors with Placeholder ids
     """
     if isinstance(obj, (tuple, list)):
         r = [self._build_placeholders(o) for o in obj]
         return type(obj)(r)
     elif isinstance(obj, dict):
         return {key: self._build_placeholders(value) for key, value in obj.items()}
     elif isinstance(obj, FrameworkTensor):
         if obj.id in self.placeholders:
             return self.placeholders[obj.id].id
         placeholder = PlaceHolder(id=obj.id, owner=self.owner)
         self.placeholders[obj.id] = placeholder
         return placeholder.id
     else:
         return obj
예제 #19
0
def test_placeholder_forwarding():
    class TestClass(object):
        def child_only(self):
            return "Method 1"

        def copy(self):
            return "Method 2"  # pragma: no cover

    placeholder = PlaceHolder()
    placeholder.instantiate(TestClass())

    # Should be forwarded to the child
    assert placeholder.child_only() == "Method 1"

    # Should be found in placeholder -- should not be forwarded
    assert placeholder.copy() != "Method 2"

    # Not found in placeholder or child
    with pytest.raises(AttributeError):
        placeholder.dummy_method()
예제 #20
0
    def translate(self):
        plan = self.plan

        args_shape = plan.get_args_shape()
        args = PlaceHolder.create_placeholders(args_shape)

        # Temporarily remove reference to original function
        tmp_forward = plan.forward
        plan.forward = None

        # To avoid storing Plan state tensors in torchscript, they will be send as parameters
        plan_params = plan.parameters()
        if len(plan_params) > 0:
            args = (*args, plan_params)
        torchscript_plan = jit.trace(plan, args)
        plan.torchscript = torchscript_plan
        plan.forward = tmp_forward

        return plan
예제 #21
0
def test_register_computation_action():
    role = Role()
    placeholder = PlaceHolder()
    target = torch.ones([1])

    action = ("__add__", target, (), {})

    role.register_action((action, placeholder), ComputationAction)

    assert len(role.actions) == 1

    registered = role.actions[0]

    assert isinstance(registered, ComputationAction)
    assert registered.name == "__add__"
    assert registered.target == target
    assert registered.args == ()
    assert registered.kwargs == {}
    assert registered.return_ids == (placeholder.id, )
예제 #22
0
파일: threepio.py 프로젝트: znbdata/PySyft
    def translate_multi_action(self, translated_cmds: List[Command],
                               action: ComputationAction, role: Role):
        cmd_config = translated_cmds.pop(0)
        store = {}
        actions = []
        for cmd in translated_cmds:
            # Create local store of placeholders
            if cmd.placeholder_output is not None:
                store[cmd.placeholder_output] = PlaceHolder(role=role)

            for i, arg in enumerate(cmd.args):
                if type(arg) == pythreepio.command.Placeholder:
                    # Replace any threepio placeholders w/ pysyft placeholders
                    cmd.args[i] = store.get(arg.key, None)

            # Create action informat needed for role's register_action method
            role_action = (
                (".".join(cmd.attrs), None, tuple(cmd.args), cmd.kwargs),
                store[cmd.placeholder_output],
            )
            role.register_action(role_action, ComputationAction)
예제 #23
0
    def __call__(self, protocol_function):
        protocol = Protocol(
            name=protocol_function.__name__,
            forward_func=protocol_function,
            id=sy.ID_PROVIDER.pop(),
            owner=sy.local_worker,
        )

        # Build the protocol automatically
        if self.args_shape:
            args_ = PlaceHolder.create_placeholders(self.args_shape)
            try:
                protocol.build(*args_)
            except TypeError as e:
                raise ValueError(
                    "Automatic build using @func2protocol failed!\nCheck that:\n"
                    " - you have provided the correct number of shapes in args_shape\n"
                    " - you have no simple numbers like int or float as args. If you do "
                    "so, please consider using a tensor instead."
                )
        return protocol
예제 #24
0
    def __call__(self, plan_function):
        plan = Plan(
            name=plan_function.__name__,
            include_state=self.include_state,
            forward_func=plan_function,
            state_tensors=self.state_tensors,
            id=sy.ID_PROVIDER.pop(),
            owner=sy.local_worker,
        )

        # Build the plan automatically
        if self.args_shape:
            args_ = PlaceHolder.create_placeholders(self.args_shape)
            try:
                plan.build(*args_, trace_autograd=self.trace_autograd)
            except TypeError as e:
                raise ValueError(
                    "Automatic build using @func2plan failed!\nCheck that:\n"
                    " - you have provided the correct number of shapes in args_shape\n"
                    " - you have no simple numbers like int or float as args. If you do "
                    "so, please consider using a tensor instead.")
        return plan
예제 #25
0
    def _instantiate_inputs(self, args_):
        """ Takes input arguments for this role and generate placeholders.
        """
        input_placeholders = tuple(self.placeholders[input_id]
                                   for input_id in self.input_placeholder_ids)
        PlaceHolder.instantiate_placeholders(input_placeholders, args_)

        # Last extra argument is a state?
        if (len(self.state.state_placeholders) > 0
                and len(args_) - len(input_placeholders) == 1
                and isinstance(args_[-1], (list, tuple))
                and len(args_[-1]) == len(self.state.state_placeholders)):
            state_placeholders = tuple(self.placeholders[ph.id.value]
                                       for ph in self.state.state_placeholders)
            PlaceHolder.instantiate_placeholders(self.state.state_placeholders,
                                                 args_[-1])
            PlaceHolder.instantiate_placeholders(state_placeholders, args_[-1])
예제 #26
0
def test_plan_can_be_jit_traced(hook, workers):
    args_shape = [(1, )]

    @sy.func2plan(args_shape=args_shape, state=(th.tensor([1.0]), ))
    def foo(x, state):
        (bias, ) = state.read()
        x = x * 2
        return x + bias

    assert isinstance(foo.__str__(), str)
    assert len(foo.actions) > 0
    assert foo.is_built

    t = th.tensor([1.0, 2])
    x = foo(t)

    assert (x == th.tensor([3.0, 5])).all()

    args = PlaceHolder.create_placeholders(args_shape)
    torchscript_plan = th.jit.trace(foo, args)

    y = torchscript_plan(t)

    assert (y == th.tensor([3.0, 5])).all()
예제 #27
0
    def translate(self):
        plan = self.plan

        args_shape = plan.get_args_shape()
        args = PlaceHolder.create_placeholders(args_shape)

        # Temporarily remove reference to original function
        tmp_forward = plan.forward
        plan.forward = None

        # To avoid storing Plan state tensors inside the torchscript,
        # we trace wrapper func, which accepts state parameters as last arg
        # and sets them into the Plan before executing the Plan
        def wrap_stateful_plan(*args):
            role = plan.role
            state = args[-1]
            if 0 < len(role.state.state_placeholders) == len(state) and isinstance(
                state, (list, tuple)
            ):
                state_placeholders = tuple(
                    role.placeholders[ph.id.value] for ph in role.state.state_placeholders
                )
                PlaceHolder.instantiate_placeholders(role.state.state_placeholders, state)
                PlaceHolder.instantiate_placeholders(state_placeholders, state)

            return plan(*args[:-1])

        plan_params = plan.parameters()
        if len(plan_params) > 0:
            torchscript_plan = jit.trace(wrap_stateful_plan, (*args, plan_params))
        else:
            torchscript_plan = jit.trace(plan, args)
        plan.torchscript = torchscript_plan
        plan.forward = tmp_forward

        return plan
예제 #28
0
    def build(self, *args, trace_autograd=False):
        """Builds the plan.

        First, run the function to be converted in a plan in a context which
        activates the tracing and record the actions in trace.logs

        Second, store the result ids temporarily to helper ordering the output
        placeholders at return time

        Third, loop through the trace logs and replace the tensors found in the
        actions logged by PlaceHolders. Record those actions in
        plan.actions

        Args:
            args: Input arguments to run the plan
        """
        # Reset previous build
        self.role.reset()

        def build_nested_arg(arg, leaf_function):
            if isinstance(arg, list):
                return [build_nested_arg(obj, leaf_function) for obj in arg]
            elif isinstance(arg, tuple):
                return tuple(
                    [build_nested_arg(obj, leaf_function) for obj in arg])
            elif isinstance(arg, dict):
                return {
                    k: build_nested_arg(v, leaf_function)
                    for k, v in arg.items()
                }
            else:
                return leaf_function(arg)

        # Enable tracing
        self.toggle_tracing(True)
        self.is_building = True

        # typecheck
        self.input_types = NestedTypeWrapper(args)

        # Run once to build the plan
        if trace_autograd:
            # Wrap arguments that require gradients with AutogradTensor,
            # to be able to trace autograd operations
            args = build_nested_arg(
                args,
                lambda x: AutogradTensor().on(x, wrap=False)
                if isinstance(x, FrameworkTensor) and x.requires_grad else x,
            )
            # Add Placeholder after AutogradTensor in the chain
            # so that all operations that happen inside AutogradTensor are recorded by Placeholder
            args_placeholders = build_nested_arg(
                args,
                lambda x: PlaceHolder.insert(
                    x, AutogradTensor, role=self.role, tracing=True),
            )
        else:
            # Add Placeholder on top of each arg
            args = args_placeholders = build_nested_arg(
                args,
                lambda x: PlaceHolder.create_from(
                    x, role=self.role, tracing=True),
            )

        # Add state to args if needed
        if self.include_state:
            args += (self.state, )

        # Check the plan arguments to see what framework wrappers we might need to send to the plan
        framework_kwargs = {}

        forward_args = inspect.getfullargspec(self.forward).args
        for f_name, wrap_framework_func in Plan._wrapped_frameworks.items():
            if f_name in forward_args:
                framework_kwargs[f_name] = wrap_framework_func(
                    self.role, self.owner)

        results = self.forward(*args, **framework_kwargs)

        # Disable tracing
        self.toggle_tracing(False)
        self.is_building = False

        # Register inputs in role
        self.role.register_inputs(args_placeholders)

        # Register outputs in role
        if isinstance(results, (tuple, list)):
            results_placeholders = tuple(
                PlaceHolder.extract(result) for result in results)
        else:
            results_placeholders = PlaceHolder.extract(results)
        self.role.register_outputs(results_placeholders)

        self.is_built = True

        # Build registered translations
        for translator in Plan._build_translators:
            try:
                self.add_translation(translator)
                self.translations.append(translator)
            except:
                warnings.warn(
                    f"Failed to translate Plan with {translator.__name__}")

        return results
예제 #29
0
def test_create_from():
    t = torch.tensor([1, 2, 3])
    ph = PlaceHolder.create_from(t, owner=sy.local_worker)

    assert isinstance(ph, PlaceHolder)
    assert (ph.child == torch.tensor([1, 2, 3])).all()
예제 #30
0
    def build(self, *args, trace_autograd=False):
        """Builds the plan.

        First, run the function to be converted in a plan in a context which
        activates the tracing and record the actions in trace.logs

        Second, store the result ids temporarily to helper ordering the output
        placeholders at return time

        Third, loop through the trace logs and replace the tensors found in the
        actions logged by PlaceHolders. Record those actions in
        plan.actions

        Args:
            args: Input arguments to run the plan
        """

        # Enable tracing
        self.toggle_tracing(True)
        self.is_building = True

        if trace_autograd:
            # Wrap arguments that require gradients with AutogradTensor,
            # to be able to trace autograd operations
            args = tuple(
                AutogradTensor().on(arg, wrap=False) if
                isinstance(arg, FrameworkTensor) and arg.requires_grad else arg
                for arg in args)
            # Add Placeholder after AutogradTensor in the chain
            # so that all operations that happen inside AutogradTensor are recorded by Placeholder
            args_placeholders = tuple(
                PlaceHolder.insert(arg,
                                   AutogradTensor,
                                   owner=sy.local_worker,
                                   role=self.role,
                                   tracing=True) for arg in args)
        else:
            # Add Placeholder on top of each arg
            args = args_placeholders = tuple(
                PlaceHolder.create_from(
                    arg, owner=sy.local_worker, role=self.role, tracing=True)
                for arg in args)

        # Add state to args if needed
        if self.include_state:
            args += (self.state, )

        with trace(framework_packages["torch"], self.role,
                   self.owner) as wrapped_torch:
            # Look for framework kwargs
            framework_kwargs = {}
            forward_args = inspect.getfullargspec(self.forward).args
            if "torch" in forward_args:
                framework_kwargs["torch"] = wrapped_torch

            results = self.forward(*args, **framework_kwargs)

        # Disable tracing
        self.toggle_tracing(False)
        self.is_building = False

        # Register inputs in role
        self.role.register_inputs(args_placeholders)

        # Register outputs in role
        if isinstance(results, (tuple, list)):
            results_placeholders = tuple(
                PlaceHolder.extract(result) for result in results)
        else:
            results_placeholders = PlaceHolder.extract(results)
        self.role.register_outputs(results_placeholders)

        self.is_built = True

        # Build registered translations
        for translator in Plan._build_translators:
            try:
                self.add_translation(translator)
                self.translations.append(translator)
            except:
                warnings.warn(
                    f"Failed to translate Plan with {translator.__name__}")

        return results