def deser(msg_obj, worker, acquire): if 'owner' not in msg_obj: raise TypeError( "sy._LocalTensor can't deserialize a non-valid sy._LocalTensor. " "Do you wan to call sy.FloatTensor.deser() instead?") if msg_obj['owner'] == worker.id: logging.warning('_LocalTensor sent to itself') if acquire: # We need to register the info given syft_obj = sy._LocalTensor(child=None, parent=None, torch_type=msg_obj['torch_type'], owner=worker, id=msg_obj['id'], skip_register=True) else: # We point at the info which generally we can't really have # We make sure we are not creating a duplicate pointer previous_pointer = worker.get_pointer_to(msg_obj['owner'], msg_obj['id']) if previous_pointer is None: syft_obj = sy._PointerTensor(child=None, parent=None, torch_type=msg_obj['torch_type'], location=msg_obj['owner'], id_at_location=msg_obj['id'], owner=worker, id=None, skip_register=True) else: syft_obj = previous_pointer return syft_obj
def _send_msg(self, message_wrapper_json_binary, recipient): """Sends a string message to another worker with message_type information indicating how the message should be processed. :Parameters: * **recipient (** :class:`VirtualWorker` **)** the worker being sent a message. * **message_wrapper_json_binary (binary)** the message being sent encoded in binary * **out (object)** the response from the message being sent. This can be a variety of object types. However, the object is typically only used during testing or local development with :class:`VirtualWorker` workers. """ return sy._LocalTensor()
def test_encode_SyftTensor(self): # Given owner = VirtualWorker(id="me") obj = sy._LocalTensor(owner=owner, id="id", torch_type="syft.FloatTensor") expected = { "mode": "subscribe", "obj": { "___LocalTensor__": { "id": "id", "owner": "me", "torch_type": "syft.FloatTensor", } }, } # When result = self.cut.encode(obj) # Then self.assertEqual(expected, result)
def handle_call(cls, syft_command, owner): """ Execute a forwarded command on the native tensor with native operations. Receive a syft command and an owner, and converts it into command with native torch args. Excute native operations and converts it back into syft response using _LocalTensors. """ tensor_command, torch_type = torch_utils.prepare_child_command( syft_command, replace_tensorvar_with_child=True) torch_utils.assert_has_only_torch_tensorvars(tensor_command) attr = tensor_command['command'] args = tensor_command['args'] kwargs = tensor_command['kwargs'] has_self = tensor_command['has_self'] if has_self: self = tensor_command['self'] attr = torch._command_guard(attr, torch.tensorvar_methods) command = getattr(self, "native_" + attr) else: attr = torch._command_guard(attr, torch.torch_modules) elems = attr.split('.') elems[-1] = 'native_' + elems[-1] native_func_name = '.'.join(elems) command = eval(native_func_name) response = command(*args, **kwargs) # TODO : control registration process if response is None: return response if owner.id != owner.hook.local_worker.id: if isinstance(response, (int, float, bool)): response = sy.zeros(1) + response elif isinstance(response, (np.ndarray, )): response = sy.FloatTensor(response) else: if isinstance(response, (int, float, bool, np.ndarray)): return response # If the command is an in-place method, wrap self and return if has_self and utils.is_in_place_method(attr): # wrap the main element torch_utils.wrap_command_with(response, syft_command['self']) if torch_utils.is_variable(response): # Also wrap the data if it's a variable (don't use wrap_command_with: the chain is not well formed yet) syft_command['self'].child.data = response.data response.data.parent = syft_command['self'].child.data.parent # And wrap the grad if there is one if response.grad is not None: if response.grad.data.dim() > 0: syft_command['self'].child.grad = response.grad else: syft_command['self'].child.grad.native_set_() response.grad.parent = syft_command[ 'self'].child.grad.parent # Finally, fix the links .data and .grad if response.grad is None: torch_utils.link_var_chain_to_data_chain( syft_command['self'], response.data.child) else: torch_utils.link_var_chain_to_data_and_grad_chains( syft_command['self'], response.data.child, response.grad.child) return_response = syft_command['self'] # Else, the response if not self. Iterate over the response(s) and wrap with a syft tensor else: responses = response if isinstance(response, tuple) else (response, ) syft_responses = [] for resp in responses: if resp is None: # Don't wrap None syft_responses.append(resp) continue if isinstance(resp, (int, float, bool)): # if not final worker, convert into Float Tensor, which comes with a _LocalTensor if owner.id != owner.hook.local_worker.id: resp = sy.zeros(1) + resp else: # Else don't wrap it syft_responses.append(resp) continue syft_response = sy._LocalTensor(child=resp, parent=resp, owner=owner, torch_type='syft.' + type(resp).__name__) if torch_utils.is_variable(resp): if resp.grad is None: torch_utils.link_var_chain_to_data_chain( syft_response, resp.data.child) else: torch_utils.link_var_chain_to_data_and_grad_chains( syft_response, resp.data.child, resp.grad.child) syft_responses.append(syft_response) return_response = tuple(syft_responses) if len( syft_responses) > 1 else syft_responses[0] return return_response