def receive_msg(self, message_wrapper_json): """Receives an message from a worker and then executes its contents appropriately. The message is encoded as a binary blob. * **message (binary)** the message being sent * **out (object)** the response. This can be a variety of object types. However, the object is typically only used during testing or local development with :class:`VirtualWorker` workers. """ # load json into a dictionary where all objects have been deserialized message_wrapper = encode.decode( self.decode_msg(message_wrapper_json), worker=self ) # route message to appropriate logic and execute the command, returning # the "response" which should be sent back to the original worker. "private" (bool) # determines whether we are intentionally leaving out the data in the response # and instead sending pointers to the data which we will actually keep locally response, private = self.process_message_type(message_wrapper) # serialize any objects in the response into their string/dictionary form (recursive) response = encode.encode( response, retrieve_pointers=False, private_local=private ) response = self.encode_msg(response) return response
def test_encode_decode_json_python(self): """ Test that the python objects are correctly encoded and decoded in json with our encoder/JSONDecoder. The main focus is on non-serializable objects, such as torch Variable or tuple, or even slice(). """ x = Var(torch.FloatTensor([[1, -1], [0, 1]])) x.send(bob) obj = [None, ({'marcel': (1, [1.3], x), 'proust': slice(0, 2, None)}, 3)] enc, t = encode.encode(obj) enc = json.dumps(enc) dec1 = encode.decode(enc, me) enc, t = encode.encode(dec1) enc = json.dumps(enc) dec2 = encode.decode(enc, me) assert dec1 == dec2
def compile_command(attr, args, kwargs, has_self=False, self=None): """Returns the JSON serializable encoded command, the location which should receive it and the owners of the pointers seen in the command Is used in the _PointerTensor handlecall to prepare the command before forwarding it to a remote worker.""" command = { "command": attr, "has_self": has_self, "args": args, "kwargs": kwargs } if has_self: command["self"] = self command, pointers = encode.encode(command, retrieve_pointers=True) # Get information about the location and owner of the pointers if len(pointers) > 1: locations = set() owners = set() for pointer in pointers: locations.add(pointer.location) owners.add(pointer.owner) locations = list(locations) owners = list(owners) if len(locations) > 1: raise NotImplementedError( "All pointers should point to the same worker") if len(owners) > 1: raise NotImplementedError( "All pointers should share the same owner.") elif len(pointers) == 1: locations = [pointers[0].location] owners = [pointers[0].owner] else: locations = [] owners = [] return command, locations, owners
def send_obj( self, object, new_id, recipient, new_data_id=None, new_grad_id=None, new_grad_data_id=None, ): """send_obj(self, obj, new_id, recipient, new_data_id=None) -> obj Sends an object to another :class:`VirtualWorker` and removes it from the local worker. :Parameters: * **object (object)** a python object to be sent * **new_id (int)** the id where the object should be stored * **recipient (** :class:`VirtualWorker` **)** the worker object to send the message to. """ # if the object is a torch object, run some special checks, otherwise just set the ID if hasattr(object, "child"): object.child.id = new_id if torch_utils.is_variable_name(object.child.torch_type): if ( new_data_id is None or new_grad_id is None or new_grad_data_id is None ): raise AttributeError( ( "Please provide the new_data_id, new_grad_id, and " "new_grad_data_id args, to be able to point to Var.data, .grad" ) ) if self.get_pointer_to(recipient, new_data_id) is not None: raise MemoryError("You already point at ", recipient, ":", new_id) err_msg = "You can't have the same id for {} and {}." assert new_id != new_data_id, err_msg.format("var", "var.data") assert new_id != new_grad_id, err_msg.format("var", "var.grad") assert new_id != new_grad_data_id, err_msg.format( "var", "var.grad.data" ) assert new_data_id != new_grad_id, err_msg.format( "var.data", "var.grad" ) assert new_data_id != new_grad_data_id, err_msg.format( "var.data", "var.grad.data" ) assert new_grad_id != new_grad_data_id, err_msg.format( "var.grad", "var.grad.data" ) object.data.child.id = new_data_id if object.grad is None: object.init_grad_() object.grad.child.id = new_grad_id object.grad.data.child.id = new_grad_data_id else: object.id = new_id if self.get_pointer_to(recipient, new_id) is not None: raise MemoryError("You already point at ", recipient, ":", new_id) if self is recipient: raise MemoryError( ( "The recipient {} is the same as the owner {} of the object {}" "that you are trying to send" ).format(recipient, self, object.id) ) object = encode.encode(object, retrieve_pointers=False, private_local=False) # We don't need any response to proceed to registration self.send_msg(message=object, message_type="obj", recipient=recipient)
def process_message_type(self, message_wrapper): """This method takes a message wrapper and attempts to process it agaist known processing methods. If the method is a composite message, it unroles applies recursively. * **message_wrapper (dict)** Dictionary containing the message and meta information * **out (object, bool)** the response. This can be a variety of object types. However, the object is typically only used during testing or local development with :class:`VirtualWorker` workers. The bool specifies if the response is private or not (private: we don't encode the data but juts info on the tensor; not private: we transmit data to be acquired by the receiver) """ # the contents of the message message = message_wrapper["message"] # this series of if/else statements uses the message_wrapper['type'] # value to determine where to route the incoming message. # if the message contains an object being sent to us if message_wrapper["type"] == "obj": object = message # if object is a numpy array if isinstance(message, np.ndarray): """do nothing.""" # if object is a Torch object - pre-process it for registration else: torch_utils.fix_chain_structure(object) # register the object, saving it in self._objects and ensuring that # object.owner is set correctly self.register(object) # we do not send a response back # TODO: send a "successful" or "not successful" response? return {}, False # if the message contains Receiving a request for an object # to be sent to another worker. For example "x.get()" would execute here. # if x is a pointer to an object hosted on this worker. elif message_wrapper["type"] == "req_obj": # Because it was pointed at, it's the first syft_object of the chain, # so its parent is the tensorvar object = self.get_obj(message) # if object being returned is a numpy array if isinstance(object, np.ndarray): """""" # delete the numpy array from our local registry self.de_register(object) # send the numpy array back to the worker that asked for it return object, False # object is a pytorch array else: # if the object is NOT a variable, then we simply # take the object's parent, and return the entire object # all children will be serialized recursively tensorvar = object.parent # if the object is a variable, we have to make special # considerations to ensure that the data and grad are all # properly deregistered if torch_utils.is_variable_name(object.torch_type): syft_data_object = tensorvar.data.child self.de_register(syft_data_object) if tensorvar.grad is not None: syft_grad_object = tensorvar.grad.child self.de_register(syft_grad_object) syft_grad_data_object = tensorvar.grad.data.child self.de_register(syft_grad_data_object) # deregister the object self.de_register(object) # return the object # False means we're actually return the data (it's not private) return tensorvar, False # A torch command from another worker involving one or more tensors # hosted locally. For example: "z = x + y" would execute here. elif message_wrapper["type"] == "torch_cmd": # route the command to the torch command logic result = self.process_torch_command(message) # save the results locally in self._objects self.register(result) # return result of torch operation # Result is private - so only actually return a pointer to the result return result, True # a numpy command from another worker involving one or more local numpy arrays # hosted locally. For example "z = x + y" would execute here. elif message_wrapper["type"] == "numpy_cmd": # route the command to the numpy command logic result = self.process_numpy_command(message) # save the result locally in self._objects and ensure that # .owner is set correctly. self.register(result) # return teh result of the numpy operation # Result is private - so only actually return a pointer to result return result, True # A composite command. Must be unrolled elif message_wrapper["type"] == "composite": raise NotImplementedError( "Composite command not handled at the moment") # a message asking for a list of tensors which fit a certain criteria. # at the time of writing this comment, this is a partial string match on the id # of the tensor. For example, if self._workers has a tensor with an id # "12345 #boston_housing #input" then a query from a pointer to this worker of # bob.search("#boston_housing") would return a list of pointers including # the one with the "12345 #boston_housing #input" id. elif message_wrapper["type"] == "query": # perform the search over all tensors on the worker tensors = self.search(message) # convert the resulting tensors to pointers pointers = [] for tensor in tensors: # initialize a pointer to the tensor. ptr = tensor.parent.create_pointer() # serialize the pointer recursively encoding = encode.encode(ptr, private_local=False, retrieve_pointers=True) pointers.append(encoding) # return the list of pointers. return pointers, True # Hopefully we don't reach this point. return "Unrecognized message type:" + message_wrapper["type"]