Ejemplo n.º 1
0
    def run_remote_inference(self, model_id, data):
        """ Run a dataset inference using a remote model.

        Args:
            model_id (str) : Model ID.
            data (Tensor) : dataset to be inferred.
        Returns:
            inference (Tensor) : Inference result
        Raises:
            RuntimeError : If an unexpected behavior happen.
        """
        serialized_data = serialize(data).decode(self.encoding)
        message = {
            REQUEST_MSG.TYPE_FIELD: REQUEST_MSG.RUN_INFERENCE,
            "model_id": model_id,
            "data": serialized_data,
            "encoding": self.encoding,
        }
        response = self._forward_json_to_websocket_server_worker(message)
        return self._return_bool_result(response,
                                        RESPONSE_MSG.INFERENCE_RESULT)
Ejemplo n.º 2
0
    def recv_msg(self, bin_message):
        """Implements the logic to receive messages.

        The binary message is deserialized and routed to the appropriate
        function. And, the response serialized the returned back.

        Every message uses this method.

        Args:
            bin_message: A binary serialized message.

        Returns:
            A binary message response.
        """

        # Step -1: save message if log_msgs ==  True
        if self.log_msgs:
            self.msg_history.append(bin_message)

        # Step 0: deserialize message
        (msg_type, contents) = serde.deserialize(bin_message, worker=self)
        if self.verbose:
            print(f"worker {self} received {msg_type} {contents}")
        # Step 1: route message to appropriate function
        response = self._message_router[msg_type](contents)

        # # Step 2: If response in none, set default
        # TODO: not sure if someone needed this - if this comment
        # is still here after Feb 15, 2018, please delete these
        # two lines of (commented out) code.
        # if response is None:
        #     response = None

        # Step 3: Serialize the message to simple python objects
        bin_response = serde.serialize(response)
        return bin_response
Ejemplo n.º 3
0
    def serve_model(
        self,
        model,
        model_id: str = None,
        mpc: bool = False,
        allow_download: bool = False,
        allow_remote_inference: bool = False,
    ):
        """Hosts the model and optionally serve it using a Socket / Rest API.
        Args:
            model : A jit model or Syft Plan.
            model_id (str): An integer/string representing the model id.
            If it isn't provided and the model is a Plan we use model.id,
            if the model is a jit model we raise an exception.
            allow_download (bool) : Allow to copy the model to run it locally.
            allow_remote_inference (bool) : Allow to run remote inferences.
        Returns:
            result (bool) : True if model was served sucessfully.
        Raises:
            ValueError: model_id isn't provided and model is a jit model.
            RunTimeError: if there was a problem during model serving.
        """

        # If the model is a Plan we send the model
        # and host the plan version created after
        # the send action
        if isinstance(model, Plan):
            # We need to use the same id in the model
            # as in the POST request.
            pointer_model = model.send(self)
            res_model = pointer_model
        else:
            res_model = model

        serialized_model = serialize(res_model).decode(self.encoding)

        message = {
            REQUEST_MSG.TYPE_FIELD: REQUEST_MSG.HOST_MODEL,
            "encoding": self.encoding,
            "model_id": model_id,
            "allow_download": str(allow_download),
            "mpc": str(mpc),
            "allow_remote_inference": str(allow_remote_inference),
            "model": serialized_model,
        }

        url = self.address.replace("ws", "http") + "/data_centric/serve-model/"

        # Multipart encoding
        form = MultipartEncoder(message)
        upload_size = form.len

        # Callback that shows upload progress
        def progress_callback(monitor):
            upload_progress = "{} / {} ({:.2f} %)".format(
                monitor.bytes_read, upload_size,
                (monitor.bytes_read / upload_size) * 100)
            print(upload_progress, end="\r")
            if monitor.bytes_read == upload_size:
                print()

        monitor = MultipartEncoderMonitor(form, progress_callback)
        headers = {
            "Prefer": "respond-async",
            "Content-Type": monitor.content_type
        }

        session = requests.Session()
        response = session.post(url, headers=headers, data=monitor).content
        session.close()
        return self._return_bool_result(json.loads(response))
Ejemplo n.º 4
0
 def _set_obj(self, obj: Union[FrameworkTensorType,
                               AbstractTensor]) -> None:
     self._objects[obj.id] = obj
     redis_db.hset(self.id, obj.id, serialize(obj))
Ejemplo n.º 5
0
def test_torch_Tensor(compress):
    t = Tensor(numpy.random.random((100, 100)))
    t_serialized = serialize(t, compress=compress)
    t_serialized_deserialized = deserialize(t_serialized, compressed=compress)
    assert (t == t_serialized_deserialized).all()
Ejemplo n.º 6
0
def test_invalid_compression_scheme(compress_scheme):
    arr = numpy.random.random((100, 100))
    with pytest.raises(CompressionNotFoundException):
        _ = serialize(arr, compress=True, compress_scheme=compress_scheme)